2005-04-17 02:20:36 +04:00
/*
* Network block device - make block devices work over TCP
*
* Note that you can not swap over this thing , yet . Seems to work but
* deadlocks sometimes - you can not swap over TCP in general .
*
* Copyright 1997 - 2000 Pavel Machek < pavel @ ucw . cz >
* Parts copyright 2001 Steven Whitehouse < steve @ chygwyn . com >
*
2006-06-25 16:47:42 +04:00
* This file is released under GPLv2 or later .
2005-04-17 02:20:36 +04:00
*
2006-06-25 16:47:42 +04:00
* ( part of code stolen from loop . c )
2005-04-17 02:20:36 +04:00
*/
# include <linux/major.h>
# include <linux/blkdev.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/sched.h>
# include <linux/fs.h>
# include <linux/bio.h>
# include <linux/stat.h>
# include <linux/errno.h>
# include <linux/file.h>
# include <linux/ioctl.h>
2006-01-06 11:09:47 +03:00
# include <linux/compiler.h>
# include <linux/err.h>
# include <linux/kernel.h>
2005-04-17 02:20:36 +04:00
# include <net/sock.h>
2007-11-13 05:10:39 +03:00
# include <linux/net.h>
2008-04-29 12:02:46 +04:00
# include <linux/kthread.h>
2005-04-17 02:20:36 +04:00
# include <asm/uaccess.h>
2006-01-06 11:09:47 +03:00
# include <asm/system.h>
2005-04-17 02:20:36 +04:00
# include <asm/types.h>
# include <linux/nbd.h>
# define LO_MAGIC 0x68797548
# ifdef NDEBUG
# define dprintk(flags, fmt...)
# else /* NDEBUG */
# define dprintk(flags, fmt...) do { \
if ( debugflags & ( flags ) ) printk ( KERN_DEBUG fmt ) ; \
} while ( 0 )
# define DBG_IOCTL 0x0004
# define DBG_INIT 0x0010
# define DBG_EXIT 0x0020
# define DBG_BLKDEV 0x0100
# define DBG_RX 0x0200
# define DBG_TX 0x0400
static unsigned int debugflags ;
# endif /* NDEBUG */
2006-07-01 15:36:36 +04:00
static unsigned int nbds_max = 16 ;
2008-02-08 15:21:51 +03:00
static struct nbd_device * nbd_dev ;
2005-04-17 02:20:36 +04:00
/*
* Use just one lock ( or at most 1 per NIC ) . Two arguments for this :
* 1. Each NIC is essentially a synchronization point for all servers
* accessed through that NIC so there ' s no need to have more locks
* than NICs anyway .
* 2. More locks lead to more " Dirty cache line bouncing " which will slow
* down each lock to the point where they ' re actually slower than just
* a single lock .
* Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this !
*/
static DEFINE_SPINLOCK ( nbd_lock ) ;
# ifndef NDEBUG
static const char * ioctl_cmd_to_ascii ( int cmd )
{
switch ( cmd ) {
case NBD_SET_SOCK : return " set-sock " ;
case NBD_SET_BLKSIZE : return " set-blksize " ;
case NBD_SET_SIZE : return " set-size " ;
case NBD_DO_IT : return " do-it " ;
case NBD_CLEAR_SOCK : return " clear-sock " ;
case NBD_CLEAR_QUE : return " clear-que " ;
case NBD_PRINT_DEBUG : return " print-debug " ;
case NBD_SET_SIZE_BLOCKS : return " set-size-blocks " ;
case NBD_DISCONNECT : return " disconnect " ;
case BLKROSET : return " set-read-only " ;
case BLKFLSBUF : return " flush-buffer-cache " ;
}
return " unknown " ;
}
static const char * nbdcmd_to_ascii ( int cmd )
{
switch ( cmd ) {
case NBD_CMD_READ : return " read " ;
case NBD_CMD_WRITE : return " write " ;
case NBD_CMD_DISC : return " disconnect " ;
}
return " invalid " ;
}
# endif /* NDEBUG */
static void nbd_end_request ( struct request * req )
{
2007-12-12 01:44:06 +03:00
int error = req - > errors ? - EIO : 0 ;
2007-07-24 11:28:11 +04:00
struct request_queue * q = req - > q ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
dprintk ( DBG_BLKDEV , " %s: request %p: %s \n " , req - > rq_disk - > disk_name ,
2007-12-12 01:44:06 +03:00
req , error ? " failed " : " done " ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( q - > queue_lock , flags ) ;
2007-12-12 01:44:06 +03:00
__blk_end_request ( req , error , req - > nr_sectors < < 9 ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( q - > queue_lock , flags ) ;
}
2007-10-17 10:27:37 +04:00
static void sock_shutdown ( struct nbd_device * lo , int lock )
{
/* Forcibly shutdown the socket causing all listeners
* to error
*
* FIXME : This code is duplicated from sys_shutdown , but
* there should be a more generic interface rather than
* calling socket ops directly here */
if ( lock )
mutex_lock ( & lo - > tx_lock ) ;
if ( lo - > sock ) {
printk ( KERN_WARNING " %s: shutting down socket \n " ,
lo - > disk - > disk_name ) ;
2007-11-13 05:10:39 +03:00
kernel_sock_shutdown ( lo - > sock , SHUT_RDWR ) ;
2007-10-17 10:27:37 +04:00
lo - > sock = NULL ;
}
if ( lock )
mutex_unlock ( & lo - > tx_lock ) ;
}
static void nbd_xmit_timeout ( unsigned long arg )
{
struct task_struct * task = ( struct task_struct * ) arg ;
printk ( KERN_WARNING " nbd: killing hung xmit (%s, pid: %d) \n " ,
task - > comm , task - > pid ) ;
force_sig ( SIGKILL , task ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Send or receive packet .
*/
2007-10-17 10:27:37 +04:00
static int sock_xmit ( struct nbd_device * lo , int send , void * buf , int size ,
2005-04-17 02:20:36 +04:00
int msg_flags )
{
2007-10-17 10:27:37 +04:00
struct socket * sock = lo - > sock ;
2005-04-17 02:20:36 +04:00
int result ;
struct msghdr msg ;
struct kvec iov ;
2007-07-16 10:41:32 +04:00
sigset_t blocked , oldset ;
2005-04-17 02:20:36 +04:00
2008-04-03 00:04:47 +04:00
if ( unlikely ( ! sock ) ) {
printk ( KERN_ERR " %s: Attempted %s on closed socket in sock_xmit \n " ,
lo - > disk - > disk_name , ( send ? " send " : " recv " ) ) ;
return - EINVAL ;
}
2005-04-17 02:20:36 +04:00
/* Allow interception of SIGKILL only
* Don ' t allow other signals to interrupt the transmission */
2007-07-16 10:41:32 +04:00
siginitsetinv ( & blocked , sigmask ( SIGKILL ) ) ;
sigprocmask ( SIG_SETMASK , & blocked , & oldset ) ;
2005-04-17 02:20:36 +04:00
do {
sock - > sk - > sk_allocation = GFP_NOIO ;
iov . iov_base = buf ;
iov . iov_len = size ;
msg . msg_name = NULL ;
msg . msg_namelen = 0 ;
msg . msg_control = NULL ;
msg . msg_controllen = 0 ;
msg . msg_flags = msg_flags | MSG_NOSIGNAL ;
2007-10-17 10:27:37 +04:00
if ( send ) {
struct timer_list ti ;
if ( lo - > xmit_timeout ) {
init_timer ( & ti ) ;
ti . function = nbd_xmit_timeout ;
ti . data = ( unsigned long ) current ;
ti . expires = jiffies + lo - > xmit_timeout ;
add_timer ( & ti ) ;
}
2005-04-17 02:20:36 +04:00
result = kernel_sendmsg ( sock , & msg , & iov , 1 , size ) ;
2007-10-17 10:27:37 +04:00
if ( lo - > xmit_timeout )
del_timer_sync ( & ti ) ;
} else
2005-04-17 02:20:36 +04:00
result = kernel_recvmsg ( sock , & msg , & iov , 1 , size , 0 ) ;
if ( signal_pending ( current ) ) {
siginfo_t info ;
printk ( KERN_WARNING " nbd (pid %d: %s) got signal %d \n " ,
2007-10-19 10:40:40 +04:00
task_pid_nr ( current ) , current - > comm ,
2007-07-16 10:41:32 +04:00
dequeue_signal_lock ( current , & current - > blocked , & info ) ) ;
2005-04-17 02:20:36 +04:00
result = - EINTR ;
2007-10-17 10:27:37 +04:00
sock_shutdown ( lo , ! send ) ;
2005-04-17 02:20:36 +04:00
break ;
}
if ( result < = 0 ) {
if ( result = = 0 )
result = - EPIPE ; /* short read */
break ;
}
size - = result ;
buf + = result ;
} while ( size > 0 ) ;
2007-07-16 10:41:32 +04:00
sigprocmask ( SIG_SETMASK , & oldset , NULL ) ;
2005-04-17 02:20:36 +04:00
return result ;
}
2007-10-17 10:27:37 +04:00
static inline int sock_send_bvec ( struct nbd_device * lo , struct bio_vec * bvec ,
2005-04-17 02:20:36 +04:00
int flags )
{
int result ;
void * kaddr = kmap ( bvec - > bv_page ) ;
2007-10-17 10:27:37 +04:00
result = sock_xmit ( lo , 1 , kaddr + bvec - > bv_offset , bvec - > bv_len , flags ) ;
2005-04-17 02:20:36 +04:00
kunmap ( bvec - > bv_page ) ;
return result ;
}
2007-10-17 10:27:37 +04:00
/* always call with the tx_lock held */
2005-04-17 02:20:36 +04:00
static int nbd_send_req ( struct nbd_device * lo , struct request * req )
{
2007-09-25 14:35:59 +04:00
int result , flags ;
2005-04-17 02:20:36 +04:00
struct nbd_request request ;
unsigned long size = req - > nr_sectors < < 9 ;
request . magic = htonl ( NBD_REQUEST_MAGIC ) ;
request . type = htonl ( nbd_cmd ( req ) ) ;
request . from = cpu_to_be64 ( ( u64 ) req - > sector < < 9 ) ;
request . len = htonl ( size ) ;
memcpy ( request . handle , & req , sizeof ( req ) ) ;
dprintk ( DBG_TX , " %s: request %p: sending control (%s@%llu,%luB) \n " ,
lo - > disk - > disk_name , req ,
nbdcmd_to_ascii ( nbd_cmd ( req ) ) ,
( unsigned long long ) req - > sector < < 9 ,
req - > nr_sectors < < 9 ) ;
2007-10-17 10:27:37 +04:00
result = sock_xmit ( lo , 1 , & request , sizeof ( request ) ,
( nbd_cmd ( req ) = = NBD_CMD_WRITE ) ? MSG_MORE : 0 ) ;
2005-04-17 02:20:36 +04:00
if ( result < = 0 ) {
printk ( KERN_ERR " %s: Send control failed (result %d) \n " ,
lo - > disk - > disk_name , result ) ;
goto error_out ;
}
if ( nbd_cmd ( req ) = = NBD_CMD_WRITE ) {
2007-09-25 14:35:59 +04:00
struct req_iterator iter ;
struct bio_vec * bvec ;
2005-04-17 02:20:36 +04:00
/*
* we are really probing at internals to determine
* whether to set MSG_MORE or not . . .
*/
2007-09-25 14:35:59 +04:00
rq_for_each_segment ( bvec , req , iter ) {
2007-08-16 15:43:12 +04:00
flags = 0 ;
if ( ! rq_iter_last ( req , iter ) )
flags = MSG_MORE ;
dprintk ( DBG_TX , " %s: request %p: sending %d bytes data \n " ,
lo - > disk - > disk_name , req , bvec - > bv_len ) ;
2007-10-17 10:27:37 +04:00
result = sock_send_bvec ( lo , bvec , flags ) ;
2007-08-16 15:43:12 +04:00
if ( result < = 0 ) {
printk ( KERN_ERR " %s: Send data failed (result %d) \n " ,
lo - > disk - > disk_name , result ) ;
goto error_out ;
}
2005-04-17 02:20:36 +04:00
}
}
return 0 ;
error_out :
return 1 ;
}
2007-10-17 10:26:14 +04:00
static struct request * nbd_find_request ( struct nbd_device * lo ,
struct request * xreq )
2005-04-17 02:20:36 +04:00
{
2007-10-17 10:26:14 +04:00
struct request * req , * tmp ;
2006-01-06 11:09:47 +03:00
int err ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:09:47 +03:00
err = wait_event_interruptible ( lo - > active_wq , lo - > active_req ! = xreq ) ;
if ( unlikely ( err ) )
goto out ;
2005-04-17 02:20:36 +04:00
spin_lock ( & lo - > queue_lock ) ;
2007-10-17 10:26:14 +04:00
list_for_each_entry_safe ( req , tmp , & lo - > queue_head , queuelist ) {
2005-04-17 02:20:36 +04:00
if ( req ! = xreq )
continue ;
list_del_init ( & req - > queuelist ) ;
spin_unlock ( & lo - > queue_lock ) ;
return req ;
}
spin_unlock ( & lo - > queue_lock ) ;
2006-01-06 11:09:47 +03:00
err = - ENOENT ;
out :
return ERR_PTR ( err ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-17 10:27:37 +04:00
static inline int sock_recv_bvec ( struct nbd_device * lo , struct bio_vec * bvec )
2005-04-17 02:20:36 +04:00
{
int result ;
void * kaddr = kmap ( bvec - > bv_page ) ;
2007-10-17 10:27:37 +04:00
result = sock_xmit ( lo , 0 , kaddr + bvec - > bv_offset , bvec - > bv_len ,
2005-04-17 02:20:36 +04:00
MSG_WAITALL ) ;
kunmap ( bvec - > bv_page ) ;
return result ;
}
/* NULL returned = something went wrong, inform userspace */
static struct request * nbd_read_stat ( struct nbd_device * lo )
{
int result ;
struct nbd_reply reply ;
struct request * req ;
reply . magic = 0 ;
2007-10-17 10:27:37 +04:00
result = sock_xmit ( lo , 0 , & reply , sizeof ( reply ) , MSG_WAITALL ) ;
2005-04-17 02:20:36 +04:00
if ( result < = 0 ) {
printk ( KERN_ERR " %s: Receive control failed (result %d) \n " ,
lo - > disk - > disk_name , result ) ;
goto harderror ;
}
2006-07-30 14:03:31 +04:00
if ( ntohl ( reply . magic ) ! = NBD_REPLY_MAGIC ) {
printk ( KERN_ERR " %s: Wrong magic (0x%lx) \n " ,
lo - > disk - > disk_name ,
( unsigned long ) ntohl ( reply . magic ) ) ;
result = - EPROTO ;
goto harderror ;
}
2007-10-17 10:26:14 +04:00
req = nbd_find_request ( lo , * ( struct request * * ) reply . handle ) ;
2006-01-06 11:09:47 +03:00
if ( unlikely ( IS_ERR ( req ) ) ) {
result = PTR_ERR ( req ) ;
if ( result ! = - ENOENT )
goto harderror ;
2005-04-17 02:20:36 +04:00
printk ( KERN_ERR " %s: Unexpected reply (%p) \n " ,
lo - > disk - > disk_name , reply . handle ) ;
result = - EBADR ;
goto harderror ;
}
if ( ntohl ( reply . error ) ) {
printk ( KERN_ERR " %s: Other side returned error (%d) \n " ,
lo - > disk - > disk_name , ntohl ( reply . error ) ) ;
req - > errors + + ;
return req ;
}
dprintk ( DBG_RX , " %s: request %p: got reply \n " ,
lo - > disk - > disk_name , req ) ;
if ( nbd_cmd ( req ) = = NBD_CMD_READ ) {
2007-09-25 14:35:59 +04:00
struct req_iterator iter ;
struct bio_vec * bvec ;
rq_for_each_segment ( bvec , req , iter ) {
2007-10-17 10:27:37 +04:00
result = sock_recv_bvec ( lo , bvec ) ;
2007-08-16 15:43:12 +04:00
if ( result < = 0 ) {
printk ( KERN_ERR " %s: Receive data failed (result %d) \n " ,
lo - > disk - > disk_name , result ) ;
req - > errors + + ;
return req ;
}
dprintk ( DBG_RX , " %s: request %p: got %d bytes data \n " ,
lo - > disk - > disk_name , req , bvec - > bv_len ) ;
2005-04-17 02:20:36 +04:00
}
}
return req ;
harderror :
lo - > harderror = result ;
return NULL ;
}
2007-05-22 00:08:01 +04:00
static ssize_t pid_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
2006-12-07 07:40:53 +03:00
{
2007-05-22 00:08:01 +04:00
struct gendisk * disk = dev_to_disk ( dev ) ;
return sprintf ( buf , " %ld \n " ,
2006-12-07 07:40:53 +03:00
( long ) ( ( struct nbd_device * ) disk - > private_data ) - > pid ) ;
}
2007-05-22 00:08:01 +04:00
static struct device_attribute pid_attr = {
. attr = { . name = " pid " , . mode = S_IRUGO , . owner = THIS_MODULE } ,
2006-12-07 07:40:53 +03:00
. show = pid_show ,
} ;
2007-05-09 13:33:36 +04:00
static int nbd_do_it ( struct nbd_device * lo )
2005-04-17 02:20:36 +04:00
{
struct request * req ;
2007-05-09 13:33:36 +04:00
int ret ;
2005-04-17 02:20:36 +04:00
BUG_ON ( lo - > magic ! = LO_MAGIC ) ;
2006-12-07 07:40:53 +03:00
lo - > pid = current - > pid ;
2007-05-22 00:08:01 +04:00
ret = sysfs_create_file ( & lo - > disk - > dev . kobj , & pid_attr . attr ) ;
2007-05-09 13:33:36 +04:00
if ( ret ) {
printk ( KERN_ERR " nbd: sysfs_create_file failed! " ) ;
return ret ;
}
2006-12-07 07:40:53 +03:00
2005-04-17 02:20:36 +04:00
while ( ( req = nbd_read_stat ( lo ) ) ! = NULL )
nbd_end_request ( req ) ;
2006-12-07 07:40:53 +03:00
2007-05-22 00:08:01 +04:00
sysfs_remove_file ( & lo - > disk - > dev . kobj , & pid_attr . attr ) ;
2007-05-09 13:33:36 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
static void nbd_clear_que ( struct nbd_device * lo )
{
struct request * req ;
BUG_ON ( lo - > magic ! = LO_MAGIC ) ;
2006-01-06 11:09:47 +03:00
/*
* Because we have set lo - > sock to NULL under the tx_lock , all
* modifications to the list must have completed by now . For
* the same reason , the active_req must be NULL .
*
* As a consequence , we don ' t need to take the spin lock while
* purging the list here .
*/
BUG_ON ( lo - > sock ) ;
BUG_ON ( lo - > active_req ) ;
while ( ! list_empty ( & lo - > queue_head ) ) {
req = list_entry ( lo - > queue_head . next , struct request ,
queuelist ) ;
list_del_init ( & req - > queuelist ) ;
req - > errors + + ;
nbd_end_request ( req ) ;
}
2005-04-17 02:20:36 +04:00
}
2007-10-17 10:27:37 +04:00
2008-04-29 12:02:46 +04:00
static void nbd_handle_req ( struct nbd_device * lo , struct request * req )
{
if ( ! blk_fs_request ( req ) )
goto error_out ;
nbd_cmd ( req ) = NBD_CMD_READ ;
if ( rq_data_dir ( req ) = = WRITE ) {
nbd_cmd ( req ) = NBD_CMD_WRITE ;
if ( lo - > flags & NBD_READ_ONLY ) {
printk ( KERN_ERR " %s: Write on read-only \n " ,
lo - > disk - > disk_name ) ;
goto error_out ;
}
}
req - > errors = 0 ;
mutex_lock ( & lo - > tx_lock ) ;
if ( unlikely ( ! lo - > sock ) ) {
mutex_unlock ( & lo - > tx_lock ) ;
printk ( KERN_ERR " %s: Attempted send on closed socket \n " ,
lo - > disk - > disk_name ) ;
req - > errors + + ;
nbd_end_request ( req ) ;
return ;
}
lo - > active_req = req ;
if ( nbd_send_req ( lo , req ) ! = 0 ) {
printk ( KERN_ERR " %s: Request send failed \n " ,
lo - > disk - > disk_name ) ;
req - > errors + + ;
nbd_end_request ( req ) ;
} else {
spin_lock ( & lo - > queue_lock ) ;
list_add ( & req - > queuelist , & lo - > queue_head ) ;
spin_unlock ( & lo - > queue_lock ) ;
}
lo - > active_req = NULL ;
mutex_unlock ( & lo - > tx_lock ) ;
wake_up_all ( & lo - > active_wq ) ;
return ;
error_out :
req - > errors + + ;
nbd_end_request ( req ) ;
}
static int nbd_thread ( void * data )
{
struct nbd_device * lo = data ;
struct request * req ;
set_user_nice ( current , - 20 ) ;
while ( ! kthread_should_stop ( ) | | ! list_empty ( & lo - > waiting_queue ) ) {
/* wait for something to do */
wait_event_interruptible ( lo - > waiting_wq ,
kthread_should_stop ( ) | |
! list_empty ( & lo - > waiting_queue ) ) ;
/* extract request */
if ( list_empty ( & lo - > waiting_queue ) )
continue ;
spin_lock_irq ( & lo - > queue_lock ) ;
req = list_entry ( lo - > waiting_queue . next , struct request ,
queuelist ) ;
list_del_init ( & req - > queuelist ) ;
spin_unlock_irq ( & lo - > queue_lock ) ;
/* handle request */
nbd_handle_req ( lo , req ) ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
/*
* We always wait for result of write , for now . It would be nice to make it optional
* in future
2007-06-20 15:53:23 +04:00
* if ( ( rq_data_dir ( req ) = = WRITE ) & & ( lo - > flags & NBD_WRITE_NOCHK ) )
2005-04-17 02:20:36 +04:00
* { printk ( " Warning: Ignoring result! \n " ) ; nbd_end_request ( req ) ; }
*/
2007-07-24 11:28:11 +04:00
static void do_nbd_request ( struct request_queue * q )
2005-04-17 02:20:36 +04:00
{
struct request * req ;
while ( ( req = elv_next_request ( q ) ) ! = NULL ) {
struct nbd_device * lo ;
blkdev_dequeue_request ( req ) ;
2008-04-29 12:02:46 +04:00
spin_unlock_irq ( q - > queue_lock ) ;
2006-08-10 10:44:47 +04:00
dprintk ( DBG_BLKDEV , " %s: request %p: dequeued (flags=%x) \n " ,
req - > rq_disk - > disk_name , req , req - > cmd_type ) ;
2005-04-17 02:20:36 +04:00
lo = req - > rq_disk - > private_data ;
BUG_ON ( lo - > magic ! = LO_MAGIC ) ;
2008-04-29 12:02:46 +04:00
spin_lock_irq ( & lo - > queue_lock ) ;
list_add_tail ( & req - > queuelist , & lo - > waiting_queue ) ;
spin_unlock_irq ( & lo - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
2008-04-29 12:02:46 +04:00
wake_up ( & lo - > waiting_wq ) ;
2006-01-06 11:09:47 +03:00
2005-04-17 02:20:36 +04:00
spin_lock_irq ( q - > queue_lock ) ;
}
}
static int nbd_ioctl ( struct inode * inode , struct file * file ,
unsigned int cmd , unsigned long arg )
{
struct nbd_device * lo = inode - > i_bdev - > bd_disk - > private_data ;
int error ;
struct request sreq ;
2008-04-29 12:02:46 +04:00
struct task_struct * thread ;
2005-04-17 02:20:36 +04:00
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
BUG_ON ( lo - > magic ! = LO_MAGIC ) ;
/* Anyone capable of this syscall can do *real bad* things */
dprintk ( DBG_IOCTL , " %s: nbd_ioctl cmd=%s(0x%x) arg=%lu \n " ,
lo - > disk - > disk_name , ioctl_cmd_to_ascii ( cmd ) , cmd , arg ) ;
switch ( cmd ) {
case NBD_DISCONNECT :
printk ( KERN_INFO " %s: NBD_DISCONNECT \n " , lo - > disk - > disk_name ) ;
2006-08-10 10:44:47 +04:00
sreq . cmd_type = REQ_TYPE_SPECIAL ;
2005-04-17 02:20:36 +04:00
nbd_cmd ( & sreq ) = NBD_CMD_DISC ;
/*
* Set these to sane values in case server implementation
* fails to check the request type first and also to keep
* debugging output cleaner .
*/
sreq . sector = 0 ;
sreq . nr_sectors = 0 ;
if ( ! lo - > sock )
return - EINVAL ;
2007-10-17 10:27:37 +04:00
mutex_lock ( & lo - > tx_lock ) ;
2005-04-17 02:20:36 +04:00
nbd_send_req ( lo , & sreq ) ;
2007-10-17 10:27:37 +04:00
mutex_unlock ( & lo - > tx_lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
case NBD_CLEAR_SOCK :
error = 0 ;
2006-03-23 14:00:38 +03:00
mutex_lock ( & lo - > tx_lock ) ;
2005-04-17 02:20:36 +04:00
lo - > sock = NULL ;
2006-03-23 14:00:38 +03:00
mutex_unlock ( & lo - > tx_lock ) ;
2005-04-17 02:20:36 +04:00
file = lo - > file ;
lo - > file = NULL ;
nbd_clear_que ( lo ) ;
2006-01-06 11:09:47 +03:00
BUG_ON ( ! list_empty ( & lo - > queue_head ) ) ;
2005-04-17 02:20:36 +04:00
if ( file )
fput ( file ) ;
return error ;
case NBD_SET_SOCK :
if ( lo - > file )
return - EBUSY ;
error = - EINVAL ;
file = fget ( arg ) ;
if ( file ) {
2006-12-08 13:37:22 +03:00
inode = file - > f_path . dentry - > d_inode ;
2005-04-17 02:20:36 +04:00
if ( S_ISSOCK ( inode - > i_mode ) ) {
lo - > file = file ;
lo - > sock = SOCKET_I ( inode ) ;
error = 0 ;
} else {
fput ( file ) ;
}
}
return error ;
case NBD_SET_BLKSIZE :
lo - > blksize = arg ;
lo - > bytesize & = ~ ( lo - > blksize - 1 ) ;
inode - > i_bdev - > bd_inode - > i_size = lo - > bytesize ;
set_blocksize ( inode - > i_bdev , lo - > blksize ) ;
set_capacity ( lo - > disk , lo - > bytesize > > 9 ) ;
return 0 ;
case NBD_SET_SIZE :
lo - > bytesize = arg & ~ ( lo - > blksize - 1 ) ;
inode - > i_bdev - > bd_inode - > i_size = lo - > bytesize ;
set_blocksize ( inode - > i_bdev , lo - > blksize ) ;
set_capacity ( lo - > disk , lo - > bytesize > > 9 ) ;
return 0 ;
2007-10-17 10:27:37 +04:00
case NBD_SET_TIMEOUT :
lo - > xmit_timeout = arg * HZ ;
return 0 ;
2005-04-17 02:20:36 +04:00
case NBD_SET_SIZE_BLOCKS :
lo - > bytesize = ( ( u64 ) arg ) * lo - > blksize ;
inode - > i_bdev - > bd_inode - > i_size = lo - > bytesize ;
set_blocksize ( inode - > i_bdev , lo - > blksize ) ;
set_capacity ( lo - > disk , lo - > bytesize > > 9 ) ;
return 0 ;
case NBD_DO_IT :
if ( ! lo - > file )
return - EINVAL ;
2008-04-29 12:02:46 +04:00
thread = kthread_create ( nbd_thread , lo , lo - > disk - > disk_name ) ;
if ( IS_ERR ( thread ) )
return PTR_ERR ( thread ) ;
wake_up_process ( thread ) ;
2007-05-09 13:33:36 +04:00
error = nbd_do_it ( lo ) ;
2008-04-29 12:02:46 +04:00
kthread_stop ( thread ) ;
2007-05-09 13:33:36 +04:00
if ( error )
return error ;
2007-10-17 10:27:37 +04:00
sock_shutdown ( lo , 1 ) ;
2005-04-17 02:20:36 +04:00
file = lo - > file ;
lo - > file = NULL ;
nbd_clear_que ( lo ) ;
printk ( KERN_WARNING " %s: queue cleared \n " , lo - > disk - > disk_name ) ;
if ( file )
fput ( file ) ;
2007-10-17 10:27:36 +04:00
lo - > bytesize = 0 ;
inode - > i_bdev - > bd_inode - > i_size = 0 ;
set_capacity ( lo - > disk , 0 ) ;
2005-04-17 02:20:36 +04:00
return lo - > harderror ;
case NBD_CLEAR_QUE :
2006-01-06 11:09:47 +03:00
/*
* This is for compatibility only . The queue is always cleared
* by NBD_DO_IT or NBD_CLEAR_SOCK .
*/
BUG_ON ( ! lo - > sock & & ! list_empty ( & lo - > queue_head ) ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
case NBD_PRINT_DEBUG :
printk ( KERN_INFO " %s: next = %p, prev = %p, head = %p \n " ,
inode - > i_bdev - > bd_disk - > disk_name ,
lo - > queue_head . next , lo - > queue_head . prev ,
& lo - > queue_head ) ;
return 0 ;
}
return - EINVAL ;
}
static struct block_device_operations nbd_fops =
{
. owner = THIS_MODULE ,
. ioctl = nbd_ioctl ,
} ;
/*
* And here should be modules and kernel interface
* ( Just smiley confuses emacs : - )
*/
static int __init nbd_init ( void )
{
int err = - ENOMEM ;
int i ;
2006-03-25 14:07:04 +03:00
BUILD_BUG_ON ( sizeof ( struct nbd_request ) ! = 28 ) ;
2005-04-17 02:20:36 +04:00
2008-02-08 15:21:51 +03:00
nbd_dev = kcalloc ( nbds_max , sizeof ( * nbd_dev ) , GFP_KERNEL ) ;
if ( ! nbd_dev )
return - ENOMEM ;
2005-05-01 19:59:07 +04:00
for ( i = 0 ; i < nbds_max ; i + + ) {
2005-04-17 02:20:36 +04:00
struct gendisk * disk = alloc_disk ( 1 ) ;
2008-02-24 02:23:50 +03:00
elevator_t * old_e ;
2005-04-17 02:20:36 +04:00
if ( ! disk )
goto out ;
nbd_dev [ i ] . disk = disk ;
/*
* The new linux 2.5 block layer implementation requires
* every gendisk to have its very own request_queue struct .
* These structs are big so we dynamically allocate them .
*/
disk - > queue = blk_init_queue ( do_nbd_request , & nbd_lock ) ;
if ( ! disk - > queue ) {
put_disk ( disk ) ;
goto out ;
}
2008-02-24 02:23:50 +03:00
old_e = disk - > queue - > elevator ;
if ( elevator_init ( disk - > queue , " deadline " ) = = 0 | |
elevator_init ( disk - > queue , " noop " ) = = 0 ) {
elevator_exit ( old_e ) ;
}
2005-04-17 02:20:36 +04:00
}
if ( register_blkdev ( NBD_MAJOR , " nbd " ) ) {
err = - EIO ;
goto out ;
}
printk ( KERN_INFO " nbd: registered device at major %d \n " , NBD_MAJOR ) ;
dprintk ( DBG_INIT , " nbd: debugflags=0x%x \n " , debugflags ) ;
2005-05-01 19:59:07 +04:00
for ( i = 0 ; i < nbds_max ; i + + ) {
2005-04-17 02:20:36 +04:00
struct gendisk * disk = nbd_dev [ i ] . disk ;
nbd_dev [ i ] . file = NULL ;
nbd_dev [ i ] . magic = LO_MAGIC ;
nbd_dev [ i ] . flags = 0 ;
2008-04-29 12:02:46 +04:00
INIT_LIST_HEAD ( & nbd_dev [ i ] . waiting_queue ) ;
2005-04-17 02:20:36 +04:00
spin_lock_init ( & nbd_dev [ i ] . queue_lock ) ;
INIT_LIST_HEAD ( & nbd_dev [ i ] . queue_head ) ;
2006-03-23 14:00:38 +03:00
mutex_init ( & nbd_dev [ i ] . tx_lock ) ;
2006-01-06 11:09:47 +03:00
init_waitqueue_head ( & nbd_dev [ i ] . active_wq ) ;
2008-04-29 12:02:46 +04:00
init_waitqueue_head ( & nbd_dev [ i ] . waiting_wq ) ;
2005-04-17 02:20:36 +04:00
nbd_dev [ i ] . blksize = 1024 ;
2007-10-17 10:27:36 +04:00
nbd_dev [ i ] . bytesize = 0 ;
2005-04-17 02:20:36 +04:00
disk - > major = NBD_MAJOR ;
disk - > first_minor = i ;
disk - > fops = & nbd_fops ;
disk - > private_data = & nbd_dev [ i ] ;
disk - > flags | = GENHD_FL_SUPPRESS_PARTITION_INFO ;
sprintf ( disk - > disk_name , " nbd%d " , i ) ;
2007-10-17 10:27:36 +04:00
set_capacity ( disk , 0 ) ;
2005-04-17 02:20:36 +04:00
add_disk ( disk ) ;
}
return 0 ;
out :
while ( i - - ) {
blk_cleanup_queue ( nbd_dev [ i ] . disk - > queue ) ;
put_disk ( nbd_dev [ i ] . disk ) ;
}
return err ;
}
static void __exit nbd_cleanup ( void )
{
int i ;
2005-05-01 19:59:07 +04:00
for ( i = 0 ; i < nbds_max ; i + + ) {
2005-04-17 02:20:36 +04:00
struct gendisk * disk = nbd_dev [ i ] . disk ;
2005-05-01 19:59:07 +04:00
nbd_dev [ i ] . magic = 0 ;
2005-04-17 02:20:36 +04:00
if ( disk ) {
del_gendisk ( disk ) ;
blk_cleanup_queue ( disk - > queue ) ;
put_disk ( disk ) ;
}
}
unregister_blkdev ( NBD_MAJOR , " nbd " ) ;
printk ( KERN_INFO " nbd: unregistered device at major %d \n " , NBD_MAJOR ) ;
}
module_init ( nbd_init ) ;
module_exit ( nbd_cleanup ) ;
MODULE_DESCRIPTION ( " Network Block Device " ) ;
MODULE_LICENSE ( " GPL " ) ;
2005-05-01 19:59:07 +04:00
module_param ( nbds_max , int , 0444 ) ;
MODULE_PARM_DESC ( nbds_max , " How many network block devices to initialize. " ) ;
2005-04-17 02:20:36 +04:00
# ifndef NDEBUG
module_param ( debugflags , int , 0644 ) ;
MODULE_PARM_DESC ( debugflags , " flags for controlling debug output " ) ;
# endif