2007-07-09 14:38:05 +04:00
/*
* bsg . c - block layer implementation of the sg v3 interface
*
* Copyright ( C ) 2004 Jens Axboe < axboe @ suse . de > SUSE Labs
* Copyright ( C ) 2004 Peter M . Jones < pjones @ redhat . com >
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file " COPYING " in the main directory of this
* archive for more details .
*
*/
/*
* TODO
* - Should this get merged , block / scsi_ioctl . c will be migrated into
* this file . To keep maintenance down , it ' s easier to have them
* seperated right now .
*
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/file.h>
# include <linux/blkdev.h>
# include <linux/poll.h>
# include <linux/cdev.h>
# include <linux/percpu.h>
# include <linux/uio.h>
# include <linux/bsg.h>
# include <scsi/scsi.h>
# include <scsi/scsi_ioctl.h>
# include <scsi/scsi_cmnd.h>
2007-03-28 15:29:24 +04:00
# include <scsi/scsi_device.h>
# include <scsi/scsi_driver.h>
2007-07-09 14:38:05 +04:00
# include <scsi/sg.h>
2007-07-17 14:21:35 +04:00
# define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
# define BSG_VERSION "0.4"
2007-07-09 14:38:05 +04:00
struct bsg_device {
request_queue_t * queue ;
spinlock_t lock ;
struct list_head busy_list ;
struct list_head done_list ;
struct hlist_node dev_list ;
atomic_t ref_count ;
int minor ;
int queued_cmds ;
int done_cmds ;
wait_queue_head_t wq_done ;
wait_queue_head_t wq_free ;
2007-07-09 14:40:35 +04:00
char name [ BUS_ID_SIZE ] ;
2007-07-09 14:38:05 +04:00
int max_queue ;
unsigned long flags ;
} ;
enum {
BSG_F_BLOCK = 1 ,
BSG_F_WRITE_PERM = 2 ,
} ;
2007-01-23 18:24:41 +03:00
# define BSG_DEFAULT_CMDS 64
2007-03-28 15:29:58 +04:00
# define BSG_MAX_DEVS 32768
2007-07-09 14:38:05 +04:00
# undef BSG_DEBUG
# ifdef BSG_DEBUG
# define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
# else
# define dprintk(fmt, args...)
# endif
static DEFINE_MUTEX ( bsg_mutex ) ;
2007-03-28 15:29:58 +04:00
static int bsg_device_nr , bsg_minor_idx ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
# define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list [ BSG_LIST_ARRAY_SIZE ] ;
2007-07-09 14:38:05 +04:00
static struct class * bsg_class ;
static LIST_HEAD ( bsg_class_list ) ;
2007-07-17 10:56:10 +04:00
static int bsg_major ;
2007-07-09 14:38:05 +04:00
2007-01-23 18:24:41 +03:00
static struct kmem_cache * bsg_cmd_cachep ;
2007-07-09 14:38:05 +04:00
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device * bd ;
struct list_head list ;
struct request * rq ;
struct bio * bio ;
2007-07-16 10:52:15 +04:00
struct bio * bidi_bio ;
2007-07-09 14:38:05 +04:00
int err ;
2006-12-20 13:20:15 +03:00
struct sg_io_v4 hdr ;
struct sg_io_v4 __user * uhdr ;
2007-07-09 14:38:05 +04:00
char sense [ SCSI_SENSE_BUFFERSIZE ] ;
} ;
static void bsg_free_command ( struct bsg_command * bc )
{
struct bsg_device * bd = bc - > bd ;
unsigned long flags ;
2007-01-23 18:24:41 +03:00
kmem_cache_free ( bsg_cmd_cachep , bc ) ;
2007-07-09 14:38:05 +04:00
spin_lock_irqsave ( & bd - > lock , flags ) ;
bd - > queued_cmds - - ;
spin_unlock_irqrestore ( & bd - > lock , flags ) ;
wake_up ( & bd - > wq_free ) ;
}
2007-05-08 17:32:03 +04:00
static struct bsg_command * bsg_alloc_command ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
2007-05-08 17:32:03 +04:00
struct bsg_command * bc = ERR_PTR ( - EINVAL ) ;
2007-07-09 14:38:05 +04:00
spin_lock_irq ( & bd - > lock ) ;
if ( bd - > queued_cmds > = bd - > max_queue )
goto out ;
bd - > queued_cmds + + ;
spin_unlock_irq ( & bd - > lock ) ;
2007-07-17 10:52:29 +04:00
bc = kmem_cache_zalloc ( bsg_cmd_cachep , GFP_KERNEL ) ;
2007-01-23 18:24:41 +03:00
if ( unlikely ( ! bc ) ) {
spin_lock_irq ( & bd - > lock ) ;
2007-01-24 11:05:54 +03:00
bd - > queued_cmds - - ;
2007-05-08 17:32:03 +04:00
bc = ERR_PTR ( - ENOMEM ) ;
2007-01-24 11:05:54 +03:00
goto out ;
2007-01-23 18:24:41 +03:00
}
2007-07-09 14:38:05 +04:00
bc - > bd = bd ;
INIT_LIST_HEAD ( & bc - > list ) ;
2007-01-23 18:24:41 +03:00
dprintk ( " %s: returning free cmd %p \n " , bd - > name , bc ) ;
2007-07-09 14:38:05 +04:00
return bc ;
out :
spin_unlock_irq ( & bd - > lock ) ;
return bc ;
}
2007-07-17 14:21:15 +04:00
static inline struct hlist_head * bsg_dev_idx_hash ( int index )
2007-07-09 14:38:05 +04:00
{
2007-07-17 14:21:15 +04:00
return & bsg_device_list [ index & ( BSG_LIST_ARRAY_SIZE - 1 ) ] ;
2007-07-09 14:38:05 +04:00
}
2007-07-17 10:52:29 +04:00
static int bsg_io_schedule ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
DEFINE_WAIT ( wait ) ;
int ret = 0 ;
spin_lock_irq ( & bd - > lock ) ;
BUG_ON ( bd - > done_cmds > bd - > queued_cmds ) ;
/*
* - ENOSPC or - ENODATA ? I ' m going for - ENODATA , meaning " I have no
* work to do " , even though we return -ENOSPC after this same test
* during bsg_write ( ) - - there , it means our buffer can ' t have more
* bsg_commands added to it , thus has no space left .
*/
if ( bd - > done_cmds = = bd - > queued_cmds ) {
ret = - ENODATA ;
goto unlock ;
}
if ( ! test_bit ( BSG_F_BLOCK , & bd - > flags ) ) {
ret = - EAGAIN ;
goto unlock ;
}
2007-07-17 10:52:29 +04:00
prepare_to_wait ( & bd - > wq_done , & wait , TASK_UNINTERRUPTIBLE ) ;
2007-07-09 14:38:05 +04:00
spin_unlock_irq ( & bd - > lock ) ;
io_schedule ( ) ;
finish_wait ( & bd - > wq_done , & wait ) ;
return ret ;
unlock :
spin_unlock_irq ( & bd - > lock ) ;
return ret ;
}
2006-12-20 13:20:15 +03:00
static int blk_fill_sgv4_hdr_rq ( request_queue_t * q , struct request * rq ,
struct sg_io_v4 * hdr , int has_write_perm )
{
memset ( rq - > cmd , 0 , BLK_MAX_CDB ) ; /* ATAPI hates garbage after CDB */
if ( copy_from_user ( rq - > cmd , ( void * ) ( unsigned long ) hdr - > request ,
hdr - > request_len ) )
return - EFAULT ;
2007-07-16 10:52:16 +04:00
if ( hdr - > subprotocol = = BSG_SUB_PROTOCOL_SCSI_CMD ) {
if ( blk_verify_command ( rq - > cmd , has_write_perm ) )
return - EPERM ;
} else if ( ! capable ( CAP_SYS_RAWIO ) )
2006-12-20 13:20:15 +03:00
return - EPERM ;
/*
* fill in request structure
*/
rq - > cmd_len = hdr - > request_len ;
rq - > cmd_type = REQ_TYPE_BLOCK_PC ;
rq - > timeout = ( hdr - > timeout * HZ ) / 1000 ;
if ( ! rq - > timeout )
rq - > timeout = q - > sg_timeout ;
if ( ! rq - > timeout )
rq - > timeout = BLK_DEFAULT_SG_TIMEOUT ;
return 0 ;
}
2007-07-09 14:38:05 +04:00
/*
2006-12-20 13:20:15 +03:00
* Check if sg_io_v4 from user is allowed and valid
2007-07-09 14:38:05 +04:00
*/
static int
2006-12-20 13:20:15 +03:00
bsg_validate_sgv4_hdr ( request_queue_t * q , struct sg_io_v4 * hdr , int * rw )
2007-07-09 14:38:05 +04:00
{
2007-07-16 10:52:16 +04:00
int ret = 0 ;
2006-12-20 13:20:15 +03:00
if ( hdr - > guard ! = ' Q ' )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2006-12-20 13:20:15 +03:00
if ( hdr - > request_len > BLK_MAX_CDB )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2006-12-20 13:20:15 +03:00
if ( hdr - > dout_xfer_len > ( q - > max_sectors < < 9 ) | |
hdr - > din_xfer_len > ( q - > max_sectors < < 9 ) )
2007-07-09 14:38:05 +04:00
return - EIO ;
2007-07-16 10:52:16 +04:00
switch ( hdr - > protocol ) {
case BSG_PROTOCOL_SCSI :
switch ( hdr - > subprotocol ) {
case BSG_SUB_PROTOCOL_SCSI_CMD :
case BSG_SUB_PROTOCOL_SCSI_TRANSPORT :
break ;
default :
ret = - EINVAL ;
}
break ;
default :
ret = - EINVAL ;
}
2006-12-20 13:20:15 +03:00
* rw = hdr - > dout_xfer_len ? WRITE : READ ;
2007-07-16 10:52:16 +04:00
return ret ;
2007-07-09 14:38:05 +04:00
}
/*
2006-12-20 13:20:15 +03:00
* map sg_io_v4 to a request .
2007-07-09 14:38:05 +04:00
*/
static struct request *
2006-12-20 13:20:15 +03:00
bsg_map_hdr ( struct bsg_device * bd , struct sg_io_v4 * hdr )
2007-07-09 14:38:05 +04:00
{
request_queue_t * q = bd - > queue ;
2007-07-16 10:52:15 +04:00
struct request * rq , * next_rq = NULL ;
2007-07-17 10:52:29 +04:00
int ret , rw ;
2006-12-20 13:20:15 +03:00
unsigned int dxfer_len ;
void * dxferp = NULL ;
2007-07-09 14:38:05 +04:00
2006-12-20 13:20:15 +03:00
dprintk ( " map hdr %llx/%u %llx/%u \n " , ( unsigned long long ) hdr - > dout_xferp ,
hdr - > dout_xfer_len , ( unsigned long long ) hdr - > din_xferp ,
hdr - > din_xfer_len ) ;
2007-07-09 14:38:05 +04:00
2006-12-20 13:20:15 +03:00
ret = bsg_validate_sgv4_hdr ( q , hdr , & rw ) ;
2007-07-09 14:38:05 +04:00
if ( ret )
return ERR_PTR ( ret ) ;
/*
* map scatter - gather elements seperately and string them to request
*/
rq = blk_get_request ( q , rw , GFP_KERNEL ) ;
2007-07-16 10:52:15 +04:00
if ( ! rq )
return ERR_PTR ( - ENOMEM ) ;
2006-12-20 13:20:15 +03:00
ret = blk_fill_sgv4_hdr_rq ( q , rq , hdr , test_bit ( BSG_F_WRITE_PERM ,
& bd - > flags ) ) ;
2007-07-16 10:52:15 +04:00
if ( ret )
goto out ;
if ( rw = = WRITE & & hdr - > din_xfer_len ) {
if ( ! test_bit ( QUEUE_FLAG_BIDI , & q - > queue_flags ) ) {
ret = - EOPNOTSUPP ;
goto out ;
}
next_rq = blk_get_request ( q , READ , GFP_KERNEL ) ;
if ( ! next_rq ) {
ret = - ENOMEM ;
goto out ;
}
rq - > next_rq = next_rq ;
dxferp = ( void * ) ( unsigned long ) hdr - > din_xferp ;
ret = blk_rq_map_user ( q , next_rq , dxferp , hdr - > din_xfer_len ) ;
if ( ret )
goto out ;
2007-07-09 14:38:05 +04:00
}
2006-12-20 13:20:15 +03:00
if ( hdr - > dout_xfer_len ) {
dxfer_len = hdr - > dout_xfer_len ;
dxferp = ( void * ) ( unsigned long ) hdr - > dout_xferp ;
} else if ( hdr - > din_xfer_len ) {
dxfer_len = hdr - > din_xfer_len ;
dxferp = ( void * ) ( unsigned long ) hdr - > din_xferp ;
} else
dxfer_len = 0 ;
if ( dxfer_len ) {
ret = blk_rq_map_user ( q , rq , dxferp , dxfer_len ) ;
2007-07-16 10:52:15 +04:00
if ( ret )
goto out ;
2007-07-09 14:38:05 +04:00
}
return rq ;
2007-07-16 10:52:15 +04:00
out :
blk_put_request ( rq ) ;
if ( next_rq ) {
blk_rq_unmap_user ( next_rq - > bio ) ;
blk_put_request ( next_rq ) ;
}
return ERR_PTR ( ret ) ;
2007-07-09 14:38:05 +04:00
}
/*
* async completion call - back from the block layer , when scsi / ide / whatever
* calls end_that_request_last ( ) on a request
*/
static void bsg_rq_end_io ( struct request * rq , int uptodate )
{
struct bsg_command * bc = rq - > end_io_data ;
struct bsg_device * bd = bc - > bd ;
unsigned long flags ;
2007-01-23 18:24:41 +03:00
dprintk ( " %s: finished rq %p bc %p, bio %p stat %d \n " ,
bd - > name , rq , bc , bc - > bio , uptodate ) ;
2007-07-09 14:38:05 +04:00
bc - > hdr . duration = jiffies_to_msecs ( jiffies - bc - > hdr . duration ) ;
spin_lock_irqsave ( & bd - > lock , flags ) ;
2007-07-17 10:52:29 +04:00
list_move_tail ( & bc - > list , & bd - > done_list ) ;
bd - > done_cmds + + ;
2007-07-09 14:38:05 +04:00
spin_unlock_irqrestore ( & bd - > lock , flags ) ;
2007-07-17 10:52:29 +04:00
wake_up ( & bd - > wq_done ) ;
2007-07-09 14:38:05 +04:00
}
/*
* do final setup of a ' bc ' and submit the matching ' rq ' to the block
* layer for io
*/
static void bsg_add_command ( struct bsg_device * bd , request_queue_t * q ,
struct bsg_command * bc , struct request * rq )
{
rq - > sense = bc - > sense ;
rq - > sense_len = 0 ;
/*
* add bc command to busy queue and submit rq for io
*/
bc - > rq = rq ;
bc - > bio = rq - > bio ;
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq )
bc - > bidi_bio = rq - > next_rq - > bio ;
2007-07-09 14:38:05 +04:00
bc - > hdr . duration = jiffies ;
spin_lock_irq ( & bd - > lock ) ;
list_add_tail ( & bc - > list , & bd - > busy_list ) ;
spin_unlock_irq ( & bd - > lock ) ;
dprintk ( " %s: queueing rq %p, bc %p \n " , bd - > name , rq , bc ) ;
rq - > end_io_data = bc ;
2007-07-09 14:40:35 +04:00
blk_execute_rq_nowait ( q , NULL , rq , 1 , bsg_rq_end_io ) ;
2007-07-09 14:38:05 +04:00
}
2007-07-17 10:52:29 +04:00
static struct bsg_command * bsg_next_done_cmd ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc = NULL ;
spin_lock_irq ( & bd - > lock ) ;
if ( bd - > done_cmds ) {
2007-07-17 10:52:29 +04:00
bc = list_entry ( bd - > done_list . next , struct bsg_command , list ) ;
list_del ( & bc - > list ) ;
bd - > done_cmds - - ;
2007-07-09 14:38:05 +04:00
}
spin_unlock_irq ( & bd - > lock ) ;
return bc ;
}
/*
* Get a finished command from the done list
*/
2007-05-08 17:32:03 +04:00
static struct bsg_command * bsg_get_done_cmd ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
int ret ;
do {
bc = bsg_next_done_cmd ( bd ) ;
if ( bc )
break ;
2007-05-08 17:32:03 +04:00
if ( ! test_bit ( BSG_F_BLOCK , & bd - > flags ) ) {
bc = ERR_PTR ( - EAGAIN ) ;
break ;
}
ret = wait_event_interruptible ( bd - > wq_done , bd - > done_cmds ) ;
2007-07-09 14:38:05 +04:00
if ( ret ) {
2007-05-08 17:32:03 +04:00
bc = ERR_PTR ( - ERESTARTSYS ) ;
2007-07-09 14:38:05 +04:00
break ;
}
} while ( 1 ) ;
dprintk ( " %s: returning done %p \n " , bd - > name , bc ) ;
return bc ;
}
2006-12-20 13:20:15 +03:00
static int blk_complete_sgv4_hdr_rq ( struct request * rq , struct sg_io_v4 * hdr ,
2007-07-16 10:52:15 +04:00
struct bio * bio , struct bio * bidi_bio )
2006-12-20 13:20:15 +03:00
{
int ret = 0 ;
dprintk ( " rq %p bio %p %u \n " , rq , bio , rq - > errors ) ;
/*
* fill in all the output members
*/
hdr - > device_status = status_byte ( rq - > errors ) ;
hdr - > transport_status = host_byte ( rq - > errors ) ;
hdr - > driver_status = driver_byte ( rq - > errors ) ;
hdr - > info = 0 ;
if ( hdr - > device_status | | hdr - > transport_status | | hdr - > driver_status )
hdr - > info | = SG_INFO_CHECK ;
hdr - > din_resid = rq - > data_len ;
hdr - > response_len = 0 ;
if ( rq - > sense_len & & hdr - > response ) {
2007-07-17 10:52:29 +04:00
int len = min_t ( unsigned int , hdr - > max_response_len ,
rq - > sense_len ) ;
2006-12-20 13:20:15 +03:00
ret = copy_to_user ( ( void * ) ( unsigned long ) hdr - > response ,
rq - > sense , len ) ;
if ( ! ret )
hdr - > response_len = len ;
else
ret = - EFAULT ;
}
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq ) {
blk_rq_unmap_user ( bidi_bio ) ;
blk_put_request ( rq - > next_rq ) ;
}
2006-12-20 13:20:15 +03:00
blk_rq_unmap_user ( bio ) ;
blk_put_request ( rq ) ;
return ret ;
}
2007-07-09 14:38:05 +04:00
static int bsg_complete_all_commands ( struct bsg_device * bd )
{
struct bsg_command * bc ;
int ret , tret ;
dprintk ( " %s: entered \n " , bd - > name ) ;
set_bit ( BSG_F_BLOCK , & bd - > flags ) ;
/*
* wait for all commands to complete
*/
ret = 0 ;
do {
2007-07-17 10:52:29 +04:00
ret = bsg_io_schedule ( bd ) ;
2007-07-09 14:38:05 +04:00
/*
* look for - ENODATA specifically - - we ' ll sometimes get
* - ERESTARTSYS when we ' ve taken a signal , but we can ' t
* return until we ' re done freeing the queue , so ignore
* it . The signal will get handled when we ' re done freeing
* the bsg_device .
*/
} while ( ret ! = - ENODATA ) ;
/*
* discard done commands
*/
ret = 0 ;
do {
2007-05-08 17:32:03 +04:00
spin_lock_irq ( & bd - > lock ) ;
if ( ! bd - > queued_cmds ) {
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
break ;
}
2007-06-07 15:24:06 +04:00
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
2007-05-08 17:32:03 +04:00
bc = bsg_get_done_cmd ( bd ) ;
if ( IS_ERR ( bc ) )
break ;
2007-07-16 10:52:15 +04:00
tret = blk_complete_sgv4_hdr_rq ( bc - > rq , & bc - > hdr , bc - > bio ,
bc - > bidi_bio ) ;
2007-07-09 14:38:05 +04:00
if ( ! ret )
ret = tret ;
bsg_free_command ( bc ) ;
} while ( 1 ) ;
return ret ;
}
2007-07-17 10:52:29 +04:00
static int
2007-05-08 17:32:03 +04:00
__bsg_read ( char __user * buf , size_t count , struct bsg_device * bd ,
const struct iovec * iov , ssize_t * bytes_read )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
int nr_commands , ret ;
2006-12-20 13:20:15 +03:00
if ( count % sizeof ( struct sg_io_v4 ) )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
ret = 0 ;
2006-12-20 13:20:15 +03:00
nr_commands = count / sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
while ( nr_commands ) {
2007-05-08 17:32:03 +04:00
bc = bsg_get_done_cmd ( bd ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( bc ) ) {
ret = PTR_ERR ( bc ) ;
break ;
}
/*
* this is the only case where we need to copy data back
* after completing the request . so do that here ,
* bsg_complete_work ( ) cannot do that for us
*/
2007-07-16 10:52:15 +04:00
ret = blk_complete_sgv4_hdr_rq ( bc - > rq , & bc - > hdr , bc - > bio ,
bc - > bidi_bio ) ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
if ( copy_to_user ( buf , & bc - > hdr , sizeof ( bc - > hdr ) ) )
2007-07-09 14:38:05 +04:00
ret = - EFAULT ;
bsg_free_command ( bc ) ;
if ( ret )
break ;
2006-12-20 13:20:15 +03:00
buf + = sizeof ( struct sg_io_v4 ) ;
* bytes_read + = sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
nr_commands - - ;
}
return ret ;
}
static inline void bsg_set_block ( struct bsg_device * bd , struct file * file )
{
if ( file - > f_flags & O_NONBLOCK )
clear_bit ( BSG_F_BLOCK , & bd - > flags ) ;
else
set_bit ( BSG_F_BLOCK , & bd - > flags ) ;
}
static inline void bsg_set_write_perm ( struct bsg_device * bd , struct file * file )
{
if ( file - > f_mode & FMODE_WRITE )
set_bit ( BSG_F_WRITE_PERM , & bd - > flags ) ;
else
clear_bit ( BSG_F_WRITE_PERM , & bd - > flags ) ;
}
2007-07-17 10:52:29 +04:00
/*
* Check if the error is a " real " error that we should return .
*/
2007-07-09 14:38:05 +04:00
static inline int err_block_err ( int ret )
{
if ( ret & & ret ! = - ENOSPC & & ret ! = - ENODATA & & ret ! = - EAGAIN )
return 1 ;
return 0 ;
}
static ssize_t
bsg_read ( struct file * file , char __user * buf , size_t count , loff_t * ppos )
{
struct bsg_device * bd = file - > private_data ;
int ret ;
ssize_t bytes_read ;
2006-12-20 13:18:22 +03:00
dprintk ( " %s: read %Zd bytes \n " , bd - > name , count ) ;
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
bytes_read = 0 ;
2007-05-08 17:32:03 +04:00
ret = __bsg_read ( buf , count , bd , NULL , & bytes_read ) ;
2007-07-09 14:38:05 +04:00
* ppos = bytes_read ;
if ( ! bytes_read | | ( bytes_read & & err_block_err ( ret ) ) )
bytes_read = ret ;
return bytes_read ;
}
2007-07-17 10:52:29 +04:00
static int __bsg_write ( struct bsg_device * bd , const char __user * buf ,
size_t count , ssize_t * bytes_written )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
struct request * rq ;
int ret , nr_commands ;
2006-12-20 13:20:15 +03:00
if ( count % sizeof ( struct sg_io_v4 ) )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2006-12-20 13:20:15 +03:00
nr_commands = count / sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
rq = NULL ;
bc = NULL ;
ret = 0 ;
while ( nr_commands ) {
request_queue_t * q = bd - > queue ;
2007-05-08 17:32:03 +04:00
bc = bsg_alloc_command ( bd ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( bc ) ) {
ret = PTR_ERR ( bc ) ;
bc = NULL ;
break ;
}
2006-12-20 13:20:15 +03:00
bc - > uhdr = ( struct sg_io_v4 __user * ) buf ;
2007-07-09 14:38:05 +04:00
if ( copy_from_user ( & bc - > hdr , buf , sizeof ( bc - > hdr ) ) ) {
ret = - EFAULT ;
break ;
}
/*
* get a request , fill in the blanks , and add to request queue
*/
2006-12-20 13:20:15 +03:00
rq = bsg_map_hdr ( bd , & bc - > hdr ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( rq ) ) {
ret = PTR_ERR ( rq ) ;
rq = NULL ;
break ;
}
bsg_add_command ( bd , q , bc , rq ) ;
bc = NULL ;
rq = NULL ;
nr_commands - - ;
2006-12-20 13:20:15 +03:00
buf + = sizeof ( struct sg_io_v4 ) ;
2007-07-17 10:52:29 +04:00
* bytes_written + = sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
}
if ( bc )
bsg_free_command ( bc ) ;
return ret ;
}
static ssize_t
bsg_write ( struct file * file , const char __user * buf , size_t count , loff_t * ppos )
{
struct bsg_device * bd = file - > private_data ;
2007-07-17 10:52:29 +04:00
ssize_t bytes_written ;
2007-07-09 14:38:05 +04:00
int ret ;
2006-12-20 13:18:22 +03:00
dprintk ( " %s: write %Zd bytes \n " , bd - > name , count ) ;
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
bsg_set_write_perm ( bd , file ) ;
2007-07-17 10:52:29 +04:00
bytes_written = 0 ;
ret = __bsg_write ( bd , buf , count , & bytes_written ) ;
* ppos = bytes_written ;
2007-07-09 14:38:05 +04:00
/*
* return bytes written on non - fatal errors
*/
2007-07-17 10:52:29 +04:00
if ( ! bytes_written | | ( bytes_written & & err_block_err ( ret ) ) )
bytes_written = ret ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
dprintk ( " %s: returning %Zd \n " , bd - > name , bytes_written ) ;
return bytes_written ;
2007-07-09 14:38:05 +04:00
}
static struct bsg_device * bsg_alloc_device ( void )
{
struct bsg_device * bd ;
bd = kzalloc ( sizeof ( struct bsg_device ) , GFP_KERNEL ) ;
if ( unlikely ( ! bd ) )
return NULL ;
spin_lock_init ( & bd - > lock ) ;
2007-01-23 18:24:41 +03:00
bd - > max_queue = BSG_DEFAULT_CMDS ;
2007-07-09 14:38:05 +04:00
INIT_LIST_HEAD ( & bd - > busy_list ) ;
INIT_LIST_HEAD ( & bd - > done_list ) ;
INIT_HLIST_NODE ( & bd - > dev_list ) ;
init_waitqueue_head ( & bd - > wq_free ) ;
init_waitqueue_head ( & bd - > wq_done ) ;
return bd ;
}
static int bsg_put_device ( struct bsg_device * bd )
{
int ret = 0 ;
mutex_lock ( & bsg_mutex ) ;
if ( ! atomic_dec_and_test ( & bd - > ref_count ) )
goto out ;
dprintk ( " %s: tearing down \n " , bd - > name ) ;
/*
* close can always block
*/
set_bit ( BSG_F_BLOCK , & bd - > flags ) ;
/*
* correct error detection baddies here again . it ' s the responsibility
* of the app to properly reap commands before close ( ) if it wants
* fool - proof error detection
*/
ret = bsg_complete_all_commands ( bd ) ;
blk_put_queue ( bd - > queue ) ;
hlist_del ( & bd - > dev_list ) ;
2007-01-23 18:24:41 +03:00
kfree ( bd ) ;
2007-07-09 14:38:05 +04:00
out :
mutex_unlock ( & bsg_mutex ) ;
return ret ;
}
static struct bsg_device * bsg_add_device ( struct inode * inode ,
2007-07-09 14:40:35 +04:00
struct request_queue * rq ,
2007-07-09 14:38:05 +04:00
struct file * file )
{
2007-07-17 10:52:29 +04:00
struct bsg_device * bd ;
2007-07-09 14:38:05 +04:00
# ifdef BSG_DEBUG
unsigned char buf [ 32 ] ;
# endif
bd = bsg_alloc_device ( ) ;
if ( ! bd )
return ERR_PTR ( - ENOMEM ) ;
2007-07-09 14:40:35 +04:00
bd - > queue = rq ;
kobject_get ( & rq - > kobj ) ;
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
atomic_set ( & bd - > ref_count , 1 ) ;
bd - > minor = iminor ( inode ) ;
mutex_lock ( & bsg_mutex ) ;
2007-07-17 14:21:15 +04:00
hlist_add_head ( & bd - > dev_list , bsg_dev_idx_hash ( bd - > minor ) ) ;
2007-07-09 14:38:05 +04:00
2007-07-09 14:40:35 +04:00
strncpy ( bd - > name , rq - > bsg_dev . class_dev - > class_id , sizeof ( bd - > name ) - 1 ) ;
2007-07-09 14:38:05 +04:00
dprintk ( " bound to <%s>, max queue %d \n " ,
2006-12-20 13:18:22 +03:00
format_dev_t ( buf , inode - > i_rdev ) , bd - > max_queue ) ;
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
return bd ;
}
static struct bsg_device * __bsg_get_device ( int minor )
{
struct bsg_device * bd = NULL ;
struct hlist_node * entry ;
mutex_lock ( & bsg_mutex ) ;
2007-07-17 14:21:15 +04:00
hlist_for_each ( entry , bsg_dev_idx_hash ( minor ) ) {
2007-07-09 14:38:05 +04:00
bd = hlist_entry ( entry , struct bsg_device , dev_list ) ;
if ( bd - > minor = = minor ) {
atomic_inc ( & bd - > ref_count ) ;
break ;
}
bd = NULL ;
}
mutex_unlock ( & bsg_mutex ) ;
return bd ;
}
static struct bsg_device * bsg_get_device ( struct inode * inode , struct file * file )
{
struct bsg_device * bd = __bsg_get_device ( iminor ( inode ) ) ;
struct bsg_class_device * bcd , * __bcd ;
if ( bd )
return bd ;
/*
* find the class device
*/
bcd = NULL ;
mutex_lock ( & bsg_mutex ) ;
list_for_each_entry ( __bcd , & bsg_class_list , list ) {
if ( __bcd - > minor = = iminor ( inode ) ) {
bcd = __bcd ;
break ;
}
}
mutex_unlock ( & bsg_mutex ) ;
if ( ! bcd )
return ERR_PTR ( - ENODEV ) ;
2007-07-09 14:40:35 +04:00
return bsg_add_device ( inode , bcd - > queue , file ) ;
2007-07-09 14:38:05 +04:00
}
static int bsg_open ( struct inode * inode , struct file * file )
{
struct bsg_device * bd = bsg_get_device ( inode , file ) ;
if ( IS_ERR ( bd ) )
return PTR_ERR ( bd ) ;
file - > private_data = bd ;
return 0 ;
}
static int bsg_release ( struct inode * inode , struct file * file )
{
struct bsg_device * bd = file - > private_data ;
file - > private_data = NULL ;
return bsg_put_device ( bd ) ;
}
static unsigned int bsg_poll ( struct file * file , poll_table * wait )
{
struct bsg_device * bd = file - > private_data ;
unsigned int mask = 0 ;
poll_wait ( file , & bd - > wq_done , wait ) ;
poll_wait ( file , & bd - > wq_free , wait ) ;
spin_lock_irq ( & bd - > lock ) ;
if ( ! list_empty ( & bd - > done_list ) )
mask | = POLLIN | POLLRDNORM ;
if ( bd - > queued_cmds > = bd - > max_queue )
mask | = POLLOUT ;
spin_unlock_irq ( & bd - > lock ) ;
return mask ;
}
2007-07-17 10:52:29 +04:00
static long bsg_ioctl ( struct file * file , unsigned int cmd , unsigned long arg )
2007-07-09 14:38:05 +04:00
{
struct bsg_device * bd = file - > private_data ;
int __user * uarg = ( int __user * ) arg ;
switch ( cmd ) {
/*
* our own ioctls
*/
case SG_GET_COMMAND_Q :
return put_user ( bd - > max_queue , uarg ) ;
2007-01-23 18:24:41 +03:00
case SG_SET_COMMAND_Q : {
2007-07-09 14:38:05 +04:00
int queue ;
if ( get_user ( queue , uarg ) )
return - EFAULT ;
2007-01-23 18:24:41 +03:00
if ( queue < 1 )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2007-01-23 18:24:41 +03:00
spin_lock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
bd - > max_queue = queue ;
2007-01-23 18:24:41 +03:00
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
return 0 ;
}
/*
* SCSI / sg ioctls
*/
case SG_GET_VERSION_NUM :
case SCSI_IOCTL_GET_IDLUN :
case SCSI_IOCTL_GET_BUS_NUMBER :
case SG_SET_TIMEOUT :
case SG_GET_TIMEOUT :
case SG_GET_RESERVED_SIZE :
case SG_SET_RESERVED_SIZE :
case SG_EMULATED_HOST :
case SCSI_IOCTL_SEND_COMMAND : {
void __user * uarg = ( void __user * ) arg ;
2007-07-09 14:40:35 +04:00
return scsi_cmd_ioctl ( file , bd - > queue , NULL , cmd , uarg ) ;
2007-07-09 14:38:05 +04:00
}
2006-12-20 13:20:57 +03:00
case SG_IO : {
struct request * rq ;
2007-07-16 10:52:15 +04:00
struct bio * bio , * bidi_bio = NULL ;
2006-12-20 13:20:57 +03:00
struct sg_io_v4 hdr ;
if ( copy_from_user ( & hdr , uarg , sizeof ( hdr ) ) )
return - EFAULT ;
rq = bsg_map_hdr ( bd , & hdr ) ;
if ( IS_ERR ( rq ) )
return PTR_ERR ( rq ) ;
bio = rq - > bio ;
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq )
bidi_bio = rq - > next_rq - > bio ;
2007-07-09 14:40:35 +04:00
blk_execute_rq ( bd - > queue , NULL , rq , 0 ) ;
2007-07-16 10:52:15 +04:00
blk_complete_sgv4_hdr_rq ( rq , & hdr , bio , bidi_bio ) ;
2006-12-20 13:20:57 +03:00
if ( copy_to_user ( uarg , & hdr , sizeof ( hdr ) ) )
return - EFAULT ;
2006-12-20 13:25:23 +03:00
return 0 ;
2006-12-20 13:20:57 +03:00
}
2007-07-09 14:38:05 +04:00
/*
* block device ioctls
*/
default :
#if 0
return ioctl_by_bdev ( bd - > bdev , cmd , arg ) ;
# else
return - ENOTTY ;
# endif
}
}
static struct file_operations bsg_fops = {
. read = bsg_read ,
. write = bsg_write ,
. poll = bsg_poll ,
. open = bsg_open ,
. release = bsg_release ,
2007-07-17 10:52:29 +04:00
. unlocked_ioctl = bsg_ioctl ,
2007-07-09 14:38:05 +04:00
. owner = THIS_MODULE ,
} ;
2007-07-09 14:40:35 +04:00
void bsg_unregister_queue ( struct request_queue * q )
2007-07-09 14:38:05 +04:00
{
2007-07-09 14:40:35 +04:00
struct bsg_class_device * bcd = & q - > bsg_dev ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
WARN_ON ( ! bcd - > class_dev ) ;
2007-07-09 14:38:05 +04:00
mutex_lock ( & bsg_mutex ) ;
2007-07-09 14:40:35 +04:00
sysfs_remove_link ( & q - > kobj , " bsg " ) ;
2007-07-21 03:22:17 +04:00
class_device_unregister ( bcd - > class_dev ) ;
put_device ( bcd - > dev ) ;
2007-07-09 14:38:05 +04:00
bcd - > class_dev = NULL ;
2007-07-21 03:22:17 +04:00
bcd - > dev = NULL ;
2007-07-09 14:38:05 +04:00
list_del_init ( & bcd - > list ) ;
2007-03-28 15:29:58 +04:00
bsg_device_nr - - ;
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
}
2007-03-30 13:19:39 +04:00
EXPORT_SYMBOL_GPL ( bsg_unregister_queue ) ;
2007-07-09 14:38:05 +04:00
2007-07-21 03:22:17 +04:00
int bsg_register_queue ( struct request_queue * q , struct device * gdev ,
const char * name )
2007-07-09 14:38:05 +04:00
{
2007-03-28 15:29:58 +04:00
struct bsg_class_device * bcd , * __bcd ;
2007-07-09 14:38:05 +04:00
dev_t dev ;
2007-03-28 15:29:58 +04:00
int ret = - EMFILE ;
2007-03-28 15:29:24 +04:00
struct class_device * class_dev = NULL ;
2007-07-21 03:22:17 +04:00
const char * devname ;
if ( name )
devname = name ;
else
devname = gdev - > bus_id ;
2007-07-09 14:38:05 +04:00
/*
* we need a proper transport to send commands , not a stacked device
*/
if ( ! q - > request_fn )
return 0 ;
2007-07-09 14:40:35 +04:00
bcd = & q - > bsg_dev ;
2007-07-09 14:38:05 +04:00
memset ( bcd , 0 , sizeof ( * bcd ) ) ;
INIT_LIST_HEAD ( & bcd - > list ) ;
mutex_lock ( & bsg_mutex ) ;
2007-03-28 15:29:58 +04:00
if ( bsg_device_nr = = BSG_MAX_DEVS ) {
printk ( KERN_ERR " bsg: too many bsg devices \n " ) ;
goto err ;
}
retry :
list_for_each_entry ( __bcd , & bsg_class_list , list ) {
if ( __bcd - > minor = = bsg_minor_idx ) {
bsg_minor_idx + + ;
if ( bsg_minor_idx = = BSG_MAX_DEVS )
bsg_minor_idx = 0 ;
goto retry ;
}
}
bcd - > minor = bsg_minor_idx + + ;
if ( bsg_minor_idx = = BSG_MAX_DEVS )
bsg_minor_idx = 0 ;
2007-07-09 14:40:35 +04:00
bcd - > queue = q ;
2007-07-21 03:22:17 +04:00
bcd - > dev = get_device ( gdev ) ;
2007-07-17 10:56:10 +04:00
dev = MKDEV ( bsg_major , bcd - > minor ) ;
2007-07-21 03:22:17 +04:00
class_dev = class_device_create ( bsg_class , NULL , dev , gdev , " %s " ,
devname ) ;
2007-03-28 15:29:24 +04:00
if ( IS_ERR ( class_dev ) ) {
ret = PTR_ERR ( class_dev ) ;
2007-07-21 03:22:17 +04:00
goto err_put ;
2007-03-28 15:29:24 +04:00
}
bcd - > class_dev = class_dev ;
2007-07-16 22:18:23 +04:00
if ( q - > kobj . sd ) {
2007-03-28 15:29:24 +04:00
ret = sysfs_create_link ( & q - > kobj , & bcd - > class_dev - > kobj , " bsg " ) ;
if ( ret )
2007-07-21 01:50:10 +04:00
goto err_unregister ;
2007-03-28 15:29:24 +04:00
}
2007-07-09 14:38:05 +04:00
list_add_tail ( & bcd - > list , & bsg_class_list ) ;
2007-03-28 15:29:58 +04:00
bsg_device_nr + + ;
2007-03-28 15:29:24 +04:00
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
return 0 ;
2007-07-21 01:50:10 +04:00
err_unregister :
class_device_unregister ( class_dev ) ;
2007-07-21 03:22:17 +04:00
err_put :
put_device ( gdev ) ;
2007-01-23 18:30:17 +03:00
err :
mutex_unlock ( & bsg_mutex ) ;
2007-03-28 15:29:24 +04:00
return ret ;
}
2007-03-30 13:19:39 +04:00
EXPORT_SYMBOL_GPL ( bsg_register_queue ) ;
2007-03-28 15:29:24 +04:00
2007-03-28 15:29:58 +04:00
static struct cdev bsg_cdev = {
. kobj = { . name = " bsg " , } ,
. owner = THIS_MODULE ,
} ;
2007-07-09 14:38:05 +04:00
static int __init bsg_init ( void )
{
int ret , i ;
2007-07-17 10:56:10 +04:00
dev_t devid ;
2007-07-09 14:38:05 +04:00
2007-01-23 18:24:41 +03:00
bsg_cmd_cachep = kmem_cache_create ( " bsg_cmd " ,
sizeof ( struct bsg_command ) , 0 , 0 , NULL , NULL ) ;
if ( ! bsg_cmd_cachep ) {
printk ( KERN_ERR " bsg: failed creating slab cache \n " ) ;
return - ENOMEM ;
}
2007-07-17 10:52:29 +04:00
for ( i = 0 ; i < BSG_LIST_ARRAY_SIZE ; i + + )
2007-07-09 14:38:05 +04:00
INIT_HLIST_HEAD ( & bsg_device_list [ i ] ) ;
bsg_class = class_create ( THIS_MODULE , " bsg " ) ;
2007-01-23 18:24:41 +03:00
if ( IS_ERR ( bsg_class ) ) {
2007-07-17 14:20:46 +04:00
ret = PTR_ERR ( bsg_class ) ;
goto destroy_kmemcache ;
2007-01-23 18:24:41 +03:00
}
2007-07-09 14:38:05 +04:00
2007-07-17 10:56:10 +04:00
ret = alloc_chrdev_region ( & devid , 0 , BSG_MAX_DEVS , " bsg " ) ;
2007-07-17 14:20:46 +04:00
if ( ret )
goto destroy_bsg_class ;
2007-03-28 15:29:58 +04:00
2007-07-17 10:56:10 +04:00
bsg_major = MAJOR ( devid ) ;
2007-03-28 15:29:58 +04:00
cdev_init ( & bsg_cdev , & bsg_fops ) ;
2007-07-17 10:56:10 +04:00
ret = cdev_add ( & bsg_cdev , MKDEV ( bsg_major , 0 ) , BSG_MAX_DEVS ) ;
2007-07-17 14:20:46 +04:00
if ( ret )
goto unregister_chrdev ;
2007-07-09 14:38:05 +04:00
2007-07-17 17:10:09 +04:00
printk ( KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
2007-07-17 14:21:35 +04:00
" loaded (major %d) \n " , bsg_major ) ;
2007-07-09 14:38:05 +04:00
return 0 ;
2007-07-17 14:20:46 +04:00
unregister_chrdev :
unregister_chrdev_region ( MKDEV ( bsg_major , 0 ) , BSG_MAX_DEVS ) ;
destroy_bsg_class :
class_destroy ( bsg_class ) ;
destroy_kmemcache :
kmem_cache_destroy ( bsg_cmd_cachep ) ;
return ret ;
2007-07-09 14:38:05 +04:00
}
MODULE_AUTHOR ( " Jens Axboe " ) ;
2007-07-17 14:21:35 +04:00
MODULE_DESCRIPTION ( BSG_DESCRIPTION ) ;
2007-07-09 14:38:05 +04:00
MODULE_LICENSE ( " GPL " ) ;
2007-03-28 15:29:24 +04:00
device_initcall ( bsg_init ) ;