2007-07-09 14:38:05 +04:00
/*
2007-07-29 18:00:46 +04:00
* bsg . c - block layer implementation of the sg v4 interface
2007-07-09 14:38:05 +04:00
*
* Copyright ( C ) 2004 Jens Axboe < axboe @ suse . de > SUSE Labs
* Copyright ( C ) 2004 Peter M . Jones < pjones @ redhat . com >
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2. See the file " COPYING " in the main directory of this
* archive for more details .
*
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/file.h>
# include <linux/blkdev.h>
# include <linux/poll.h>
# include <linux/cdev.h>
# include <linux/percpu.h>
# include <linux/uio.h>
2007-07-23 04:33:26 +04:00
# include <linux/idr.h>
2007-07-09 14:38:05 +04:00
# include <linux/bsg.h>
2008-05-15 19:09:23 +04:00
# include <linux/smp_lock.h>
2007-07-09 14:38:05 +04:00
# include <scsi/scsi.h>
# include <scsi/scsi_ioctl.h>
# include <scsi/scsi_cmnd.h>
2007-03-28 15:29:24 +04:00
# include <scsi/scsi_device.h>
# include <scsi/scsi_driver.h>
2007-07-09 14:38:05 +04:00
# include <scsi/sg.h>
2007-07-17 14:21:35 +04:00
# define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
# define BSG_VERSION "0.4"
2007-07-09 14:38:05 +04:00
struct bsg_device {
2007-07-24 11:28:11 +04:00
struct request_queue * queue ;
2007-07-09 14:38:05 +04:00
spinlock_t lock ;
struct list_head busy_list ;
struct list_head done_list ;
struct hlist_node dev_list ;
atomic_t ref_count ;
int queued_cmds ;
int done_cmds ;
wait_queue_head_t wq_done ;
wait_queue_head_t wq_free ;
2009-01-06 21:44:43 +03:00
char name [ 20 ] ;
2007-07-09 14:38:05 +04:00
int max_queue ;
unsigned long flags ;
} ;
enum {
BSG_F_BLOCK = 1 ,
} ;
2007-01-23 18:24:41 +03:00
# define BSG_DEFAULT_CMDS 64
2007-03-28 15:29:58 +04:00
# define BSG_MAX_DEVS 32768
2007-07-09 14:38:05 +04:00
# undef BSG_DEBUG
# ifdef BSG_DEBUG
2008-05-01 15:35:17 +04:00
# define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
2007-07-09 14:38:05 +04:00
# else
# define dprintk(fmt, args...)
# endif
static DEFINE_MUTEX ( bsg_mutex ) ;
2007-07-23 04:33:26 +04:00
static DEFINE_IDR ( bsg_minor_idr ) ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
# define BSG_LIST_ARRAY_SIZE 8
static struct hlist_head bsg_device_list [ BSG_LIST_ARRAY_SIZE ] ;
2007-07-09 14:38:05 +04:00
static struct class * bsg_class ;
2007-07-17 10:56:10 +04:00
static int bsg_major ;
2007-07-09 14:38:05 +04:00
2007-01-23 18:24:41 +03:00
static struct kmem_cache * bsg_cmd_cachep ;
2007-07-09 14:38:05 +04:00
/*
* our internal command type
*/
struct bsg_command {
struct bsg_device * bd ;
struct list_head list ;
struct request * rq ;
struct bio * bio ;
2007-07-16 10:52:15 +04:00
struct bio * bidi_bio ;
2007-07-09 14:38:05 +04:00
int err ;
2006-12-20 13:20:15 +03:00
struct sg_io_v4 hdr ;
2007-07-09 14:38:05 +04:00
char sense [ SCSI_SENSE_BUFFERSIZE ] ;
} ;
static void bsg_free_command ( struct bsg_command * bc )
{
struct bsg_device * bd = bc - > bd ;
unsigned long flags ;
2007-01-23 18:24:41 +03:00
kmem_cache_free ( bsg_cmd_cachep , bc ) ;
2007-07-09 14:38:05 +04:00
spin_lock_irqsave ( & bd - > lock , flags ) ;
bd - > queued_cmds - - ;
spin_unlock_irqrestore ( & bd - > lock , flags ) ;
wake_up ( & bd - > wq_free ) ;
}
2007-05-08 17:32:03 +04:00
static struct bsg_command * bsg_alloc_command ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
2007-05-08 17:32:03 +04:00
struct bsg_command * bc = ERR_PTR ( - EINVAL ) ;
2007-07-09 14:38:05 +04:00
spin_lock_irq ( & bd - > lock ) ;
if ( bd - > queued_cmds > = bd - > max_queue )
goto out ;
bd - > queued_cmds + + ;
spin_unlock_irq ( & bd - > lock ) ;
2007-07-17 10:52:29 +04:00
bc = kmem_cache_zalloc ( bsg_cmd_cachep , GFP_KERNEL ) ;
2007-01-23 18:24:41 +03:00
if ( unlikely ( ! bc ) ) {
spin_lock_irq ( & bd - > lock ) ;
2007-01-24 11:05:54 +03:00
bd - > queued_cmds - - ;
2007-05-08 17:32:03 +04:00
bc = ERR_PTR ( - ENOMEM ) ;
2007-01-24 11:05:54 +03:00
goto out ;
2007-01-23 18:24:41 +03:00
}
2007-07-09 14:38:05 +04:00
bc - > bd = bd ;
INIT_LIST_HEAD ( & bc - > list ) ;
2007-01-23 18:24:41 +03:00
dprintk ( " %s: returning free cmd %p \n " , bd - > name , bc ) ;
2007-07-09 14:38:05 +04:00
return bc ;
out :
spin_unlock_irq ( & bd - > lock ) ;
return bc ;
}
2007-07-17 14:21:15 +04:00
static inline struct hlist_head * bsg_dev_idx_hash ( int index )
2007-07-09 14:38:05 +04:00
{
2007-07-17 14:21:15 +04:00
return & bsg_device_list [ index & ( BSG_LIST_ARRAY_SIZE - 1 ) ] ;
2007-07-09 14:38:05 +04:00
}
2007-07-17 10:52:29 +04:00
static int bsg_io_schedule ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
DEFINE_WAIT ( wait ) ;
int ret = 0 ;
spin_lock_irq ( & bd - > lock ) ;
BUG_ON ( bd - > done_cmds > bd - > queued_cmds ) ;
/*
* - ENOSPC or - ENODATA ? I ' m going for - ENODATA , meaning " I have no
* work to do " , even though we return -ENOSPC after this same test
* during bsg_write ( ) - - there , it means our buffer can ' t have more
* bsg_commands added to it , thus has no space left .
*/
if ( bd - > done_cmds = = bd - > queued_cmds ) {
ret = - ENODATA ;
goto unlock ;
}
if ( ! test_bit ( BSG_F_BLOCK , & bd - > flags ) ) {
ret = - EAGAIN ;
goto unlock ;
}
2007-07-17 10:52:29 +04:00
prepare_to_wait ( & bd - > wq_done , & wait , TASK_UNINTERRUPTIBLE ) ;
2007-07-09 14:38:05 +04:00
spin_unlock_irq ( & bd - > lock ) ;
io_schedule ( ) ;
finish_wait ( & bd - > wq_done , & wait ) ;
return ret ;
unlock :
spin_unlock_irq ( & bd - > lock ) ;
return ret ;
}
2007-07-24 11:28:11 +04:00
static int blk_fill_sgv4_hdr_rq ( struct request_queue * q , struct request * rq ,
2008-08-16 09:10:05 +04:00
struct sg_io_v4 * hdr , struct bsg_device * bd ,
2008-09-02 23:28:45 +04:00
fmode_t has_write_perm )
2006-12-20 13:20:15 +03:00
{
2008-04-30 08:16:21 +04:00
if ( hdr - > request_len > BLK_MAX_CDB ) {
rq - > cmd = kzalloc ( hdr - > request_len , GFP_KERNEL ) ;
if ( ! rq - > cmd )
return - ENOMEM ;
}
2006-12-20 13:20:15 +03:00
if ( copy_from_user ( rq - > cmd , ( void * ) ( unsigned long ) hdr - > request ,
hdr - > request_len ) )
return - EFAULT ;
2007-07-16 10:52:16 +04:00
if ( hdr - > subprotocol = = BSG_SUB_PROTOCOL_SCSI_CMD ) {
2009-06-26 18:27:10 +04:00
if ( blk_verify_command ( rq - > cmd , has_write_perm ) )
2007-07-16 10:52:16 +04:00
return - EPERM ;
} else if ( ! capable ( CAP_SYS_RAWIO ) )
2006-12-20 13:20:15 +03:00
return - EPERM ;
/*
* fill in request structure
*/
rq - > cmd_len = hdr - > request_len ;
rq - > cmd_type = REQ_TYPE_BLOCK_PC ;
rq - > timeout = ( hdr - > timeout * HZ ) / 1000 ;
if ( ! rq - > timeout )
rq - > timeout = q - > sg_timeout ;
if ( ! rq - > timeout )
rq - > timeout = BLK_DEFAULT_SG_TIMEOUT ;
2008-12-06 01:49:18 +03:00
if ( rq - > timeout < BLK_MIN_SG_TIMEOUT )
rq - > timeout = BLK_MIN_SG_TIMEOUT ;
2006-12-20 13:20:15 +03:00
return 0 ;
}
2007-07-09 14:38:05 +04:00
/*
2006-12-20 13:20:15 +03:00
* Check if sg_io_v4 from user is allowed and valid
2007-07-09 14:38:05 +04:00
*/
static int
2007-07-24 11:28:11 +04:00
bsg_validate_sgv4_hdr ( struct request_queue * q , struct sg_io_v4 * hdr , int * rw )
2007-07-09 14:38:05 +04:00
{
2007-07-16 10:52:16 +04:00
int ret = 0 ;
2006-12-20 13:20:15 +03:00
if ( hdr - > guard ! = ' Q ' )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2007-07-16 10:52:16 +04:00
switch ( hdr - > protocol ) {
case BSG_PROTOCOL_SCSI :
switch ( hdr - > subprotocol ) {
case BSG_SUB_PROTOCOL_SCSI_CMD :
case BSG_SUB_PROTOCOL_SCSI_TRANSPORT :
break ;
default :
ret = - EINVAL ;
}
break ;
default :
ret = - EINVAL ;
}
2006-12-20 13:20:15 +03:00
* rw = hdr - > dout_xfer_len ? WRITE : READ ;
2007-07-16 10:52:16 +04:00
return ret ;
2007-07-09 14:38:05 +04:00
}
/*
2006-12-20 13:20:15 +03:00
* map sg_io_v4 to a request .
2007-07-09 14:38:05 +04:00
*/
static struct request *
2009-02-03 09:47:29 +03:00
bsg_map_hdr ( struct bsg_device * bd , struct sg_io_v4 * hdr , fmode_t has_write_perm ,
u8 * sense )
2007-07-09 14:38:05 +04:00
{
2007-07-24 11:28:11 +04:00
struct request_queue * q = bd - > queue ;
2007-07-16 10:52:15 +04:00
struct request * rq , * next_rq = NULL ;
2007-07-17 10:52:29 +04:00
int ret , rw ;
2006-12-20 13:20:15 +03:00
unsigned int dxfer_len ;
void * dxferp = NULL ;
2007-07-09 14:38:05 +04:00
2006-12-20 13:20:15 +03:00
dprintk ( " map hdr %llx/%u %llx/%u \n " , ( unsigned long long ) hdr - > dout_xferp ,
hdr - > dout_xfer_len , ( unsigned long long ) hdr - > din_xferp ,
hdr - > din_xfer_len ) ;
2007-07-09 14:38:05 +04:00
2006-12-20 13:20:15 +03:00
ret = bsg_validate_sgv4_hdr ( q , hdr , & rw ) ;
2007-07-09 14:38:05 +04:00
if ( ret )
return ERR_PTR ( ret ) ;
/*
* map scatter - gather elements seperately and string them to request
*/
rq = blk_get_request ( q , rw , GFP_KERNEL ) ;
2007-07-16 10:52:15 +04:00
if ( ! rq )
return ERR_PTR ( - ENOMEM ) ;
2008-08-16 09:10:05 +04:00
ret = blk_fill_sgv4_hdr_rq ( q , rq , hdr , bd , has_write_perm ) ;
2007-07-16 10:52:15 +04:00
if ( ret )
goto out ;
if ( rw = = WRITE & & hdr - > din_xfer_len ) {
if ( ! test_bit ( QUEUE_FLAG_BIDI , & q - > queue_flags ) ) {
ret = - EOPNOTSUPP ;
goto out ;
}
next_rq = blk_get_request ( q , READ , GFP_KERNEL ) ;
if ( ! next_rq ) {
ret = - ENOMEM ;
goto out ;
}
rq - > next_rq = next_rq ;
2008-01-26 05:05:55 +03:00
next_rq - > cmd_type = rq - > cmd_type ;
2007-07-16 10:52:15 +04:00
dxferp = ( void * ) ( unsigned long ) hdr - > din_xferp ;
2008-08-28 11:17:06 +04:00
ret = blk_rq_map_user ( q , next_rq , NULL , dxferp ,
hdr - > din_xfer_len , GFP_KERNEL ) ;
2007-07-16 10:52:15 +04:00
if ( ret )
goto out ;
2007-07-09 14:38:05 +04:00
}
2006-12-20 13:20:15 +03:00
if ( hdr - > dout_xfer_len ) {
dxfer_len = hdr - > dout_xfer_len ;
dxferp = ( void * ) ( unsigned long ) hdr - > dout_xferp ;
} else if ( hdr - > din_xfer_len ) {
dxfer_len = hdr - > din_xfer_len ;
dxferp = ( void * ) ( unsigned long ) hdr - > din_xferp ;
} else
dxfer_len = 0 ;
if ( dxfer_len ) {
2008-08-28 11:17:06 +04:00
ret = blk_rq_map_user ( q , rq , NULL , dxferp , dxfer_len ,
GFP_KERNEL ) ;
2007-07-16 10:52:15 +04:00
if ( ret )
goto out ;
2007-07-09 14:38:05 +04:00
}
2009-02-03 09:47:29 +03:00
rq - > sense = sense ;
rq - > sense_len = 0 ;
2007-07-09 14:38:05 +04:00
return rq ;
2007-07-16 10:52:15 +04:00
out :
2008-04-30 08:16:21 +04:00
if ( rq - > cmd ! = rq - > __cmd )
kfree ( rq - > cmd ) ;
2007-07-16 10:52:15 +04:00
blk_put_request ( rq ) ;
if ( next_rq ) {
blk_rq_unmap_user ( next_rq - > bio ) ;
blk_put_request ( next_rq ) ;
}
return ERR_PTR ( ret ) ;
2007-07-09 14:38:05 +04:00
}
/*
* async completion call - back from the block layer , when scsi / ide / whatever
* calls end_that_request_last ( ) on a request
*/
static void bsg_rq_end_io ( struct request * rq , int uptodate )
{
struct bsg_command * bc = rq - > end_io_data ;
struct bsg_device * bd = bc - > bd ;
unsigned long flags ;
2007-01-23 18:24:41 +03:00
dprintk ( " %s: finished rq %p bc %p, bio %p stat %d \n " ,
bd - > name , rq , bc , bc - > bio , uptodate ) ;
2007-07-09 14:38:05 +04:00
bc - > hdr . duration = jiffies_to_msecs ( jiffies - bc - > hdr . duration ) ;
spin_lock_irqsave ( & bd - > lock , flags ) ;
2007-07-17 10:52:29 +04:00
list_move_tail ( & bc - > list , & bd - > done_list ) ;
bd - > done_cmds + + ;
2007-07-09 14:38:05 +04:00
spin_unlock_irqrestore ( & bd - > lock , flags ) ;
2007-07-17 10:52:29 +04:00
wake_up ( & bd - > wq_done ) ;
2007-07-09 14:38:05 +04:00
}
/*
* do final setup of a ' bc ' and submit the matching ' rq ' to the block
* layer for io
*/
2007-07-24 11:28:11 +04:00
static void bsg_add_command ( struct bsg_device * bd , struct request_queue * q ,
2007-07-09 14:38:05 +04:00
struct bsg_command * bc , struct request * rq )
{
2009-03-24 14:23:40 +03:00
int at_head = ( 0 = = ( bc - > hdr . flags & BSG_FLAG_Q_AT_TAIL ) ) ;
2007-07-09 14:38:05 +04:00
/*
* add bc command to busy queue and submit rq for io
*/
bc - > rq = rq ;
bc - > bio = rq - > bio ;
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq )
bc - > bidi_bio = rq - > next_rq - > bio ;
2007-07-09 14:38:05 +04:00
bc - > hdr . duration = jiffies ;
spin_lock_irq ( & bd - > lock ) ;
list_add_tail ( & bc - > list , & bd - > busy_list ) ;
spin_unlock_irq ( & bd - > lock ) ;
dprintk ( " %s: queueing rq %p, bc %p \n " , bd - > name , rq , bc ) ;
rq - > end_io_data = bc ;
2009-03-24 14:23:40 +03:00
blk_execute_rq_nowait ( q , NULL , rq , at_head , bsg_rq_end_io ) ;
2007-07-09 14:38:05 +04:00
}
2007-07-17 10:52:29 +04:00
static struct bsg_command * bsg_next_done_cmd ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc = NULL ;
spin_lock_irq ( & bd - > lock ) ;
if ( bd - > done_cmds ) {
2008-03-31 05:03:40 +04:00
bc = list_first_entry ( & bd - > done_list , struct bsg_command , list ) ;
2007-07-17 10:52:29 +04:00
list_del ( & bc - > list ) ;
bd - > done_cmds - - ;
2007-07-09 14:38:05 +04:00
}
spin_unlock_irq ( & bd - > lock ) ;
return bc ;
}
/*
* Get a finished command from the done list
*/
2007-05-08 17:32:03 +04:00
static struct bsg_command * bsg_get_done_cmd ( struct bsg_device * bd )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
int ret ;
do {
bc = bsg_next_done_cmd ( bd ) ;
if ( bc )
break ;
2007-05-08 17:32:03 +04:00
if ( ! test_bit ( BSG_F_BLOCK , & bd - > flags ) ) {
bc = ERR_PTR ( - EAGAIN ) ;
break ;
}
ret = wait_event_interruptible ( bd - > wq_done , bd - > done_cmds ) ;
2007-07-09 14:38:05 +04:00
if ( ret ) {
2007-05-08 17:32:03 +04:00
bc = ERR_PTR ( - ERESTARTSYS ) ;
2007-07-09 14:38:05 +04:00
break ;
}
} while ( 1 ) ;
dprintk ( " %s: returning done %p \n " , bd - > name , bc ) ;
return bc ;
}
2006-12-20 13:20:15 +03:00
static int blk_complete_sgv4_hdr_rq ( struct request * rq , struct sg_io_v4 * hdr ,
2007-07-16 10:52:15 +04:00
struct bio * bio , struct bio * bidi_bio )
2006-12-20 13:20:15 +03:00
{
int ret = 0 ;
2009-02-03 09:47:29 +03:00
dprintk ( " rq %p bio %p 0x%x \n " , rq , bio , rq - > errors ) ;
2006-12-20 13:20:15 +03:00
/*
* fill in all the output members
*/
hdr - > device_status = status_byte ( rq - > errors ) ;
hdr - > transport_status = host_byte ( rq - > errors ) ;
hdr - > driver_status = driver_byte ( rq - > errors ) ;
hdr - > info = 0 ;
if ( hdr - > device_status | | hdr - > transport_status | | hdr - > driver_status )
hdr - > info | = SG_INFO_CHECK ;
hdr - > response_len = 0 ;
if ( rq - > sense_len & & hdr - > response ) {
2007-07-17 10:52:29 +04:00
int len = min_t ( unsigned int , hdr - > max_response_len ,
rq - > sense_len ) ;
2006-12-20 13:20:15 +03:00
ret = copy_to_user ( ( void * ) ( unsigned long ) hdr - > response ,
rq - > sense , len ) ;
if ( ! ret )
hdr - > response_len = len ;
else
ret = - EFAULT ;
}
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq ) {
2009-05-07 17:24:37 +04:00
hdr - > dout_resid = rq - > resid_len ;
hdr - > din_resid = rq - > next_rq - > resid_len ;
2007-07-16 10:52:15 +04:00
blk_rq_unmap_user ( bidi_bio ) ;
blk_put_request ( rq - > next_rq ) ;
2007-07-29 18:00:46 +04:00
} else if ( rq_data_dir ( rq ) = = READ )
2009-05-07 17:24:37 +04:00
hdr - > din_resid = rq - > resid_len ;
2007-07-29 18:00:46 +04:00
else
2009-05-07 17:24:37 +04:00
hdr - > dout_resid = rq - > resid_len ;
2007-07-16 10:52:15 +04:00
2007-12-29 19:59:53 +03:00
/*
* If the request generated a negative error number , return it
* ( providing we aren ' t already returning an error ) ; if it ' s
* just a protocol response ( i . e . non negative ) , that gets
* processed above .
*/
if ( ! ret & & rq - > errors < 0 )
ret = rq - > errors ;
2006-12-20 13:20:15 +03:00
blk_rq_unmap_user ( bio ) ;
2008-04-30 08:16:21 +04:00
if ( rq - > cmd ! = rq - > __cmd )
kfree ( rq - > cmd ) ;
2006-12-20 13:20:15 +03:00
blk_put_request ( rq ) ;
return ret ;
}
2007-07-09 14:38:05 +04:00
static int bsg_complete_all_commands ( struct bsg_device * bd )
{
struct bsg_command * bc ;
int ret , tret ;
dprintk ( " %s: entered \n " , bd - > name ) ;
/*
* wait for all commands to complete
*/
ret = 0 ;
do {
2007-07-17 10:52:29 +04:00
ret = bsg_io_schedule ( bd ) ;
2007-07-09 14:38:05 +04:00
/*
* look for - ENODATA specifically - - we ' ll sometimes get
* - ERESTARTSYS when we ' ve taken a signal , but we can ' t
* return until we ' re done freeing the queue , so ignore
* it . The signal will get handled when we ' re done freeing
* the bsg_device .
*/
} while ( ret ! = - ENODATA ) ;
/*
* discard done commands
*/
ret = 0 ;
do {
2007-05-08 17:32:03 +04:00
spin_lock_irq ( & bd - > lock ) ;
if ( ! bd - > queued_cmds ) {
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
break ;
}
2007-06-07 15:24:06 +04:00
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
2007-05-08 17:32:03 +04:00
bc = bsg_get_done_cmd ( bd ) ;
if ( IS_ERR ( bc ) )
break ;
2007-07-16 10:52:15 +04:00
tret = blk_complete_sgv4_hdr_rq ( bc - > rq , & bc - > hdr , bc - > bio ,
bc - > bidi_bio ) ;
2007-07-09 14:38:05 +04:00
if ( ! ret )
ret = tret ;
bsg_free_command ( bc ) ;
} while ( 1 ) ;
return ret ;
}
2007-07-17 10:52:29 +04:00
static int
2007-05-08 17:32:03 +04:00
__bsg_read ( char __user * buf , size_t count , struct bsg_device * bd ,
const struct iovec * iov , ssize_t * bytes_read )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
int nr_commands , ret ;
2006-12-20 13:20:15 +03:00
if ( count % sizeof ( struct sg_io_v4 ) )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
ret = 0 ;
2006-12-20 13:20:15 +03:00
nr_commands = count / sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
while ( nr_commands ) {
2007-05-08 17:32:03 +04:00
bc = bsg_get_done_cmd ( bd ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( bc ) ) {
ret = PTR_ERR ( bc ) ;
break ;
}
/*
* this is the only case where we need to copy data back
* after completing the request . so do that here ,
* bsg_complete_work ( ) cannot do that for us
*/
2007-07-16 10:52:15 +04:00
ret = blk_complete_sgv4_hdr_rq ( bc - > rq , & bc - > hdr , bc - > bio ,
bc - > bidi_bio ) ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
if ( copy_to_user ( buf , & bc - > hdr , sizeof ( bc - > hdr ) ) )
2007-07-09 14:38:05 +04:00
ret = - EFAULT ;
bsg_free_command ( bc ) ;
if ( ret )
break ;
2006-12-20 13:20:15 +03:00
buf + = sizeof ( struct sg_io_v4 ) ;
* bytes_read + = sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
nr_commands - - ;
}
return ret ;
}
static inline void bsg_set_block ( struct bsg_device * bd , struct file * file )
{
if ( file - > f_flags & O_NONBLOCK )
clear_bit ( BSG_F_BLOCK , & bd - > flags ) ;
else
set_bit ( BSG_F_BLOCK , & bd - > flags ) ;
}
2007-07-17 10:52:29 +04:00
/*
* Check if the error is a " real " error that we should return .
*/
2007-07-09 14:38:05 +04:00
static inline int err_block_err ( int ret )
{
if ( ret & & ret ! = - ENOSPC & & ret ! = - ENODATA & & ret ! = - EAGAIN )
return 1 ;
return 0 ;
}
static ssize_t
bsg_read ( struct file * file , char __user * buf , size_t count , loff_t * ppos )
{
struct bsg_device * bd = file - > private_data ;
int ret ;
ssize_t bytes_read ;
2006-12-20 13:18:22 +03:00
dprintk ( " %s: read %Zd bytes \n " , bd - > name , count ) ;
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
2008-06-26 15:48:27 +04:00
2007-07-09 14:38:05 +04:00
bytes_read = 0 ;
2007-05-08 17:32:03 +04:00
ret = __bsg_read ( buf , count , bd , NULL , & bytes_read ) ;
2007-07-09 14:38:05 +04:00
* ppos = bytes_read ;
if ( ! bytes_read | | ( bytes_read & & err_block_err ( ret ) ) )
bytes_read = ret ;
return bytes_read ;
}
2007-07-17 10:52:29 +04:00
static int __bsg_write ( struct bsg_device * bd , const char __user * buf ,
2008-09-02 23:28:45 +04:00
size_t count , ssize_t * bytes_written ,
fmode_t has_write_perm )
2007-07-09 14:38:05 +04:00
{
struct bsg_command * bc ;
struct request * rq ;
int ret , nr_commands ;
2006-12-20 13:20:15 +03:00
if ( count % sizeof ( struct sg_io_v4 ) )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2006-12-20 13:20:15 +03:00
nr_commands = count / sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
rq = NULL ;
bc = NULL ;
ret = 0 ;
while ( nr_commands ) {
2007-07-24 11:28:11 +04:00
struct request_queue * q = bd - > queue ;
2007-07-09 14:38:05 +04:00
2007-05-08 17:32:03 +04:00
bc = bsg_alloc_command ( bd ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( bc ) ) {
ret = PTR_ERR ( bc ) ;
bc = NULL ;
break ;
}
if ( copy_from_user ( & bc - > hdr , buf , sizeof ( bc - > hdr ) ) ) {
ret = - EFAULT ;
break ;
}
/*
* get a request , fill in the blanks , and add to request queue
*/
2009-02-03 09:47:29 +03:00
rq = bsg_map_hdr ( bd , & bc - > hdr , has_write_perm , bc - > sense ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( rq ) ) {
ret = PTR_ERR ( rq ) ;
rq = NULL ;
break ;
}
bsg_add_command ( bd , q , bc , rq ) ;
bc = NULL ;
rq = NULL ;
nr_commands - - ;
2006-12-20 13:20:15 +03:00
buf + = sizeof ( struct sg_io_v4 ) ;
2007-07-17 10:52:29 +04:00
* bytes_written + = sizeof ( struct sg_io_v4 ) ;
2007-07-09 14:38:05 +04:00
}
if ( bc )
bsg_free_command ( bc ) ;
return ret ;
}
static ssize_t
bsg_write ( struct file * file , const char __user * buf , size_t count , loff_t * ppos )
{
struct bsg_device * bd = file - > private_data ;
2007-07-17 10:52:29 +04:00
ssize_t bytes_written ;
2007-07-09 14:38:05 +04:00
int ret ;
2006-12-20 13:18:22 +03:00
dprintk ( " %s: write %Zd bytes \n " , bd - > name , count ) ;
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
2007-07-17 10:52:29 +04:00
bytes_written = 0 ;
2008-08-16 09:10:05 +04:00
ret = __bsg_write ( bd , buf , count , & bytes_written ,
file - > f_mode & FMODE_WRITE ) ;
2007-07-17 10:52:29 +04:00
* ppos = bytes_written ;
2007-07-09 14:38:05 +04:00
/*
* return bytes written on non - fatal errors
*/
2007-07-17 10:52:29 +04:00
if ( ! bytes_written | | ( bytes_written & & err_block_err ( ret ) ) )
bytes_written = ret ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:52:29 +04:00
dprintk ( " %s: returning %Zd \n " , bd - > name , bytes_written ) ;
return bytes_written ;
2007-07-09 14:38:05 +04:00
}
static struct bsg_device * bsg_alloc_device ( void )
{
struct bsg_device * bd ;
bd = kzalloc ( sizeof ( struct bsg_device ) , GFP_KERNEL ) ;
if ( unlikely ( ! bd ) )
return NULL ;
spin_lock_init ( & bd - > lock ) ;
2007-01-23 18:24:41 +03:00
bd - > max_queue = BSG_DEFAULT_CMDS ;
2007-07-09 14:38:05 +04:00
INIT_LIST_HEAD ( & bd - > busy_list ) ;
INIT_LIST_HEAD ( & bd - > done_list ) ;
INIT_HLIST_NODE ( & bd - > dev_list ) ;
init_waitqueue_head ( & bd - > wq_free ) ;
init_waitqueue_head ( & bd - > wq_done ) ;
return bd ;
}
2008-04-18 19:43:14 +04:00
static void bsg_kref_release_function ( struct kref * kref )
{
struct bsg_class_device * bcd =
container_of ( kref , struct bsg_class_device , ref ) ;
2008-07-08 00:50:01 +04:00
struct device * parent = bcd - > parent ;
2008-04-18 19:43:14 +04:00
if ( bcd - > release )
bcd - > release ( bcd - > parent ) ;
2008-07-08 00:50:01 +04:00
put_device ( parent ) ;
2008-04-18 19:43:14 +04:00
}
2007-07-09 14:38:05 +04:00
static int bsg_put_device ( struct bsg_device * bd )
{
2008-04-18 19:43:14 +04:00
int ret = 0 , do_free ;
struct request_queue * q = bd - > queue ;
2007-07-09 14:38:05 +04:00
mutex_lock ( & bsg_mutex ) ;
2008-04-18 19:43:14 +04:00
do_free = atomic_dec_and_test ( & bd - > ref_count ) ;
2008-05-29 02:56:55 +04:00
if ( ! do_free ) {
mutex_unlock ( & bsg_mutex ) ;
2007-07-09 14:38:05 +04:00
goto out ;
2008-05-29 02:56:55 +04:00
}
hlist_del ( & bd - > dev_list ) ;
mutex_unlock ( & bsg_mutex ) ;
2007-07-09 14:38:05 +04:00
dprintk ( " %s: tearing down \n " , bd - > name ) ;
/*
* close can always block
*/
set_bit ( BSG_F_BLOCK , & bd - > flags ) ;
/*
* correct error detection baddies here again . it ' s the responsibility
* of the app to properly reap commands before close ( ) if it wants
* fool - proof error detection
*/
ret = bsg_complete_all_commands ( bd ) ;
2007-01-23 18:24:41 +03:00
kfree ( bd ) ;
2007-07-09 14:38:05 +04:00
out :
2008-04-18 19:43:14 +04:00
kref_put ( & q - > bsg_dev . ref , bsg_kref_release_function ) ;
if ( do_free )
blk_put_queue ( q ) ;
2007-07-09 14:38:05 +04:00
return ret ;
}
static struct bsg_device * bsg_add_device ( struct inode * inode ,
2007-07-09 14:40:35 +04:00
struct request_queue * rq ,
2007-07-09 14:38:05 +04:00
struct file * file )
{
2007-07-17 10:52:29 +04:00
struct bsg_device * bd ;
2008-03-31 05:03:39 +04:00
int ret ;
2007-07-09 14:38:05 +04:00
# ifdef BSG_DEBUG
unsigned char buf [ 32 ] ;
# endif
2008-03-31 05:03:39 +04:00
ret = blk_get_queue ( rq ) ;
if ( ret )
return ERR_PTR ( - ENXIO ) ;
2007-07-09 14:38:05 +04:00
bd = bsg_alloc_device ( ) ;
2008-03-31 05:03:39 +04:00
if ( ! bd ) {
blk_put_queue ( rq ) ;
2007-07-09 14:38:05 +04:00
return ERR_PTR ( - ENOMEM ) ;
2008-03-31 05:03:39 +04:00
}
2007-07-09 14:38:05 +04:00
2007-07-09 14:40:35 +04:00
bd - > queue = rq ;
2008-06-26 15:48:27 +04:00
2007-07-09 14:38:05 +04:00
bsg_set_block ( bd , file ) ;
atomic_set ( & bd - > ref_count , 1 ) ;
mutex_lock ( & bsg_mutex ) ;
2008-03-31 05:03:41 +04:00
hlist_add_head ( & bd - > dev_list , bsg_dev_idx_hash ( iminor ( inode ) ) ) ;
2007-07-09 14:38:05 +04:00
2009-01-06 21:44:43 +03:00
strncpy ( bd - > name , dev_name ( rq - > bsg_dev . class_dev ) , sizeof ( bd - > name ) - 1 ) ;
2007-07-09 14:38:05 +04:00
dprintk ( " bound to <%s>, max queue %d \n " ,
2006-12-20 13:18:22 +03:00
format_dev_t ( buf , inode - > i_rdev ) , bd - > max_queue ) ;
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
return bd ;
}
2008-03-31 05:03:41 +04:00
static struct bsg_device * __bsg_get_device ( int minor , struct request_queue * q )
2007-07-09 14:38:05 +04:00
{
2008-03-31 05:03:40 +04:00
struct bsg_device * bd ;
2007-07-09 14:38:05 +04:00
struct hlist_node * entry ;
mutex_lock ( & bsg_mutex ) ;
2008-03-31 05:03:40 +04:00
hlist_for_each_entry ( bd , entry , bsg_dev_idx_hash ( minor ) , dev_list ) {
2008-03-31 05:03:41 +04:00
if ( bd - > queue = = q ) {
2007-07-09 14:38:05 +04:00
atomic_inc ( & bd - > ref_count ) ;
2008-03-31 05:03:40 +04:00
goto found ;
2007-07-09 14:38:05 +04:00
}
}
2008-03-31 05:03:40 +04:00
bd = NULL ;
found :
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
return bd ;
}
static struct bsg_device * bsg_get_device ( struct inode * inode , struct file * file )
{
2007-07-23 04:33:26 +04:00
struct bsg_device * bd ;
struct bsg_class_device * bcd ;
2007-07-09 14:38:05 +04:00
/*
* find the class device
*/
mutex_lock ( & bsg_mutex ) ;
2007-07-23 04:33:26 +04:00
bcd = idr_find ( & bsg_minor_idr , iminor ( inode ) ) ;
2008-03-31 05:03:38 +04:00
if ( bcd )
2008-04-18 19:43:14 +04:00
kref_get ( & bcd - > ref ) ;
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
if ( ! bcd )
return ERR_PTR ( - ENODEV ) ;
2008-03-31 05:03:41 +04:00
bd = __bsg_get_device ( iminor ( inode ) , bcd - > queue ) ;
2008-03-31 05:03:38 +04:00
if ( bd )
return bd ;
bd = bsg_add_device ( inode , bcd - > queue , file ) ;
if ( IS_ERR ( bd ) )
2008-04-18 19:43:14 +04:00
kref_put ( & bcd - > ref , bsg_kref_release_function ) ;
2008-03-31 05:03:38 +04:00
return bd ;
2007-07-09 14:38:05 +04:00
}
static int bsg_open ( struct inode * inode , struct file * file )
{
2008-05-15 19:09:23 +04:00
struct bsg_device * bd ;
lock_kernel ( ) ;
bd = bsg_get_device ( inode , file ) ;
unlock_kernel ( ) ;
2007-07-09 14:38:05 +04:00
if ( IS_ERR ( bd ) )
return PTR_ERR ( bd ) ;
file - > private_data = bd ;
return 0 ;
}
static int bsg_release ( struct inode * inode , struct file * file )
{
struct bsg_device * bd = file - > private_data ;
file - > private_data = NULL ;
return bsg_put_device ( bd ) ;
}
static unsigned int bsg_poll ( struct file * file , poll_table * wait )
{
struct bsg_device * bd = file - > private_data ;
unsigned int mask = 0 ;
poll_wait ( file , & bd - > wq_done , wait ) ;
poll_wait ( file , & bd - > wq_free , wait ) ;
spin_lock_irq ( & bd - > lock ) ;
if ( ! list_empty ( & bd - > done_list ) )
mask | = POLLIN | POLLRDNORM ;
if ( bd - > queued_cmds > = bd - > max_queue )
mask | = POLLOUT ;
spin_unlock_irq ( & bd - > lock ) ;
return mask ;
}
2007-07-17 10:52:29 +04:00
static long bsg_ioctl ( struct file * file , unsigned int cmd , unsigned long arg )
2007-07-09 14:38:05 +04:00
{
struct bsg_device * bd = file - > private_data ;
int __user * uarg = ( int __user * ) arg ;
2007-12-29 19:59:53 +03:00
int ret ;
2007-07-09 14:38:05 +04:00
switch ( cmd ) {
/*
* our own ioctls
*/
case SG_GET_COMMAND_Q :
return put_user ( bd - > max_queue , uarg ) ;
2007-01-23 18:24:41 +03:00
case SG_SET_COMMAND_Q : {
2007-07-09 14:38:05 +04:00
int queue ;
if ( get_user ( queue , uarg ) )
return - EFAULT ;
2007-01-23 18:24:41 +03:00
if ( queue < 1 )
2007-07-09 14:38:05 +04:00
return - EINVAL ;
2007-01-23 18:24:41 +03:00
spin_lock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
bd - > max_queue = queue ;
2007-01-23 18:24:41 +03:00
spin_unlock_irq ( & bd - > lock ) ;
2007-07-09 14:38:05 +04:00
return 0 ;
}
/*
* SCSI / sg ioctls
*/
case SG_GET_VERSION_NUM :
case SCSI_IOCTL_GET_IDLUN :
case SCSI_IOCTL_GET_BUS_NUMBER :
case SG_SET_TIMEOUT :
case SG_GET_TIMEOUT :
case SG_GET_RESERVED_SIZE :
case SG_SET_RESERVED_SIZE :
case SG_EMULATED_HOST :
case SCSI_IOCTL_SEND_COMMAND : {
void __user * uarg = ( void __user * ) arg ;
2007-08-27 23:38:10 +04:00
return scsi_cmd_ioctl ( bd - > queue , NULL , file - > f_mode , cmd , uarg ) ;
2007-07-09 14:38:05 +04:00
}
2006-12-20 13:20:57 +03:00
case SG_IO : {
struct request * rq ;
2007-07-16 10:52:15 +04:00
struct bio * bio , * bidi_bio = NULL ;
2006-12-20 13:20:57 +03:00
struct sg_io_v4 hdr ;
2009-03-24 14:23:40 +03:00
int at_head ;
2009-02-03 09:47:29 +03:00
u8 sense [ SCSI_SENSE_BUFFERSIZE ] ;
2006-12-20 13:20:57 +03:00
if ( copy_from_user ( & hdr , uarg , sizeof ( hdr ) ) )
return - EFAULT ;
2009-02-03 09:47:29 +03:00
rq = bsg_map_hdr ( bd , & hdr , file - > f_mode & FMODE_WRITE , sense ) ;
2006-12-20 13:20:57 +03:00
if ( IS_ERR ( rq ) )
return PTR_ERR ( rq ) ;
bio = rq - > bio ;
2007-07-16 10:52:15 +04:00
if ( rq - > next_rq )
bidi_bio = rq - > next_rq - > bio ;
2009-03-24 14:23:40 +03:00
at_head = ( 0 = = ( hdr . flags & BSG_FLAG_Q_AT_TAIL ) ) ;
blk_execute_rq ( bd - > queue , NULL , rq , at_head ) ;
2007-12-29 19:59:53 +03:00
ret = blk_complete_sgv4_hdr_rq ( rq , & hdr , bio , bidi_bio ) ;
2006-12-20 13:20:57 +03:00
if ( copy_to_user ( uarg , & hdr , sizeof ( hdr ) ) )
return - EFAULT ;
2006-12-20 13:25:23 +03:00
2007-12-29 19:59:53 +03:00
return ret ;
2006-12-20 13:20:57 +03:00
}
2007-07-09 14:38:05 +04:00
/*
* block device ioctls
*/
default :
#if 0
return ioctl_by_bdev ( bd - > bdev , cmd , arg ) ;
# else
return - ENOTTY ;
# endif
}
}
2007-10-15 13:01:53 +04:00
static const struct file_operations bsg_fops = {
2007-07-09 14:38:05 +04:00
. read = bsg_read ,
. write = bsg_write ,
. poll = bsg_poll ,
. open = bsg_open ,
. release = bsg_release ,
2007-07-17 10:52:29 +04:00
. unlocked_ioctl = bsg_ioctl ,
2007-07-09 14:38:05 +04:00
. owner = THIS_MODULE ,
} ;
2007-07-09 14:40:35 +04:00
void bsg_unregister_queue ( struct request_queue * q )
2007-07-09 14:38:05 +04:00
{
2007-07-09 14:40:35 +04:00
struct bsg_class_device * bcd = & q - > bsg_dev ;
2007-07-09 14:38:05 +04:00
2007-07-21 08:23:25 +04:00
if ( ! bcd - > class_dev )
return ;
2007-07-09 14:38:05 +04:00
mutex_lock ( & bsg_mutex ) ;
2007-07-23 04:33:26 +04:00
idr_remove ( & bsg_minor_idr , bcd - > minor ) ;
2007-07-09 14:40:35 +04:00
sysfs_remove_link ( & q - > kobj , " bsg " ) ;
2008-02-22 02:13:36 +03:00
device_unregister ( bcd - > class_dev ) ;
2007-07-09 14:38:05 +04:00
bcd - > class_dev = NULL ;
2008-04-18 19:43:14 +04:00
kref_put ( & bcd - > ref , bsg_kref_release_function ) ;
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
}
2007-03-30 13:19:39 +04:00
EXPORT_SYMBOL_GPL ( bsg_unregister_queue ) ;
2007-07-09 14:38:05 +04:00
2008-04-18 19:43:14 +04:00
int bsg_register_queue ( struct request_queue * q , struct device * parent ,
const char * name , void ( * release ) ( struct device * ) )
2007-07-09 14:38:05 +04:00
{
2007-07-23 04:33:26 +04:00
struct bsg_class_device * bcd ;
2007-07-09 14:38:05 +04:00
dev_t dev ;
2007-07-23 04:33:26 +04:00
int ret , minor ;
2008-02-22 02:13:36 +03:00
struct device * class_dev = NULL ;
2007-07-21 03:22:17 +04:00
const char * devname ;
if ( name )
devname = name ;
else
2009-01-06 21:44:43 +03:00
devname = dev_name ( parent ) ;
2007-07-09 14:38:05 +04:00
/*
* we need a proper transport to send commands , not a stacked device
*/
if ( ! q - > request_fn )
return 0 ;
2007-07-09 14:40:35 +04:00
bcd = & q - > bsg_dev ;
2007-07-09 14:38:05 +04:00
memset ( bcd , 0 , sizeof ( * bcd ) ) ;
mutex_lock ( & bsg_mutex ) ;
2007-03-28 15:29:58 +04:00
2007-07-23 04:33:26 +04:00
ret = idr_pre_get ( & bsg_minor_idr , GFP_KERNEL ) ;
if ( ! ret ) {
ret = - ENOMEM ;
goto unlock ;
2007-03-28 15:29:58 +04:00
}
2007-07-23 04:33:26 +04:00
ret = idr_get_new ( & bsg_minor_idr , bcd , & minor ) ;
if ( ret < 0 )
goto unlock ;
2007-03-28 15:29:58 +04:00
2007-07-23 04:33:26 +04:00
if ( minor > = BSG_MAX_DEVS ) {
printk ( KERN_ERR " bsg: too many bsg devices \n " ) ;
ret = - EINVAL ;
goto remove_idr ;
}
bcd - > minor = minor ;
2007-07-09 14:40:35 +04:00
bcd - > queue = q ;
2008-04-18 19:43:14 +04:00
bcd - > parent = get_device ( parent ) ;
bcd - > release = release ;
kref_init ( & bcd - > ref ) ;
2007-07-17 10:56:10 +04:00
dev = MKDEV ( bsg_major , bcd - > minor ) ;
2008-07-22 07:03:34 +04:00
class_dev = device_create ( bsg_class , parent , dev , NULL , " %s " , devname ) ;
2007-03-28 15:29:24 +04:00
if ( IS_ERR ( class_dev ) ) {
ret = PTR_ERR ( class_dev ) ;
2007-07-23 04:33:26 +04:00
goto put_dev ;
2007-03-28 15:29:24 +04:00
}
bcd - > class_dev = class_dev ;
2007-07-16 22:18:23 +04:00
if ( q - > kobj . sd ) {
2007-03-28 15:29:24 +04:00
ret = sysfs_create_link ( & q - > kobj , & bcd - > class_dev - > kobj , " bsg " ) ;
if ( ret )
2007-07-23 04:33:26 +04:00
goto unregister_class_dev ;
2007-03-28 15:29:24 +04:00
}
2007-07-09 14:38:05 +04:00
mutex_unlock ( & bsg_mutex ) ;
return 0 ;
2007-07-21 01:50:10 +04:00
2007-07-23 04:33:26 +04:00
unregister_class_dev :
2008-02-22 02:13:36 +03:00
device_unregister ( class_dev ) ;
2007-07-23 04:33:26 +04:00
put_dev :
2008-04-18 19:43:14 +04:00
put_device ( parent ) ;
2007-07-23 04:33:26 +04:00
remove_idr :
idr_remove ( & bsg_minor_idr , minor ) ;
unlock :
2007-01-23 18:30:17 +03:00
mutex_unlock ( & bsg_mutex ) ;
2007-03-28 15:29:24 +04:00
return ret ;
}
2007-03-30 13:19:39 +04:00
EXPORT_SYMBOL_GPL ( bsg_register_queue ) ;
2007-03-28 15:29:24 +04:00
2007-09-13 02:06:57 +04:00
static struct cdev bsg_cdev ;
2007-03-28 15:29:58 +04:00
2009-09-19 01:01:12 +04:00
static char * bsg_devnode ( struct device * dev , mode_t * mode )
2009-04-30 17:23:42 +04:00
{
return kasprintf ( GFP_KERNEL , " bsg/%s " , dev_name ( dev ) ) ;
}
2007-07-09 14:38:05 +04:00
static int __init bsg_init ( void )
{
int ret , i ;
2007-07-17 10:56:10 +04:00
dev_t devid ;
2007-07-09 14:38:05 +04:00
2007-01-23 18:24:41 +03:00
bsg_cmd_cachep = kmem_cache_create ( " bsg_cmd " ,
2007-07-20 05:11:58 +04:00
sizeof ( struct bsg_command ) , 0 , 0 , NULL ) ;
2007-01-23 18:24:41 +03:00
if ( ! bsg_cmd_cachep ) {
printk ( KERN_ERR " bsg: failed creating slab cache \n " ) ;
return - ENOMEM ;
}
2007-07-17 10:52:29 +04:00
for ( i = 0 ; i < BSG_LIST_ARRAY_SIZE ; i + + )
2007-07-09 14:38:05 +04:00
INIT_HLIST_HEAD ( & bsg_device_list [ i ] ) ;
bsg_class = class_create ( THIS_MODULE , " bsg " ) ;
2007-01-23 18:24:41 +03:00
if ( IS_ERR ( bsg_class ) ) {
2007-07-17 14:20:46 +04:00
ret = PTR_ERR ( bsg_class ) ;
goto destroy_kmemcache ;
2007-01-23 18:24:41 +03:00
}
2009-09-19 01:01:12 +04:00
bsg_class - > devnode = bsg_devnode ;
2007-07-09 14:38:05 +04:00
2007-07-17 10:56:10 +04:00
ret = alloc_chrdev_region ( & devid , 0 , BSG_MAX_DEVS , " bsg " ) ;
2007-07-17 14:20:46 +04:00
if ( ret )
goto destroy_bsg_class ;
2007-03-28 15:29:58 +04:00
2007-07-17 10:56:10 +04:00
bsg_major = MAJOR ( devid ) ;
2007-03-28 15:29:58 +04:00
cdev_init ( & bsg_cdev , & bsg_fops ) ;
2007-07-17 10:56:10 +04:00
ret = cdev_add ( & bsg_cdev , MKDEV ( bsg_major , 0 ) , BSG_MAX_DEVS ) ;
2007-07-17 14:20:46 +04:00
if ( ret )
goto unregister_chrdev ;
2007-07-09 14:38:05 +04:00
2007-07-17 17:10:09 +04:00
printk ( KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
2007-07-17 14:21:35 +04:00
" loaded (major %d) \n " , bsg_major ) ;
2007-07-09 14:38:05 +04:00
return 0 ;
2007-07-17 14:20:46 +04:00
unregister_chrdev :
unregister_chrdev_region ( MKDEV ( bsg_major , 0 ) , BSG_MAX_DEVS ) ;
destroy_bsg_class :
class_destroy ( bsg_class ) ;
destroy_kmemcache :
kmem_cache_destroy ( bsg_cmd_cachep ) ;
return ret ;
2007-07-09 14:38:05 +04:00
}
MODULE_AUTHOR ( " Jens Axboe " ) ;
2007-07-17 14:21:35 +04:00
MODULE_DESCRIPTION ( BSG_DESCRIPTION ) ;
2007-07-09 14:38:05 +04:00
MODULE_LICENSE ( " GPL " ) ;
2007-03-28 15:29:24 +04:00
device_initcall ( bsg_init ) ;