2012-07-19 01:31:32 +04:00
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
2013-09-06 02:29:12 +04:00
* ( C ) Copyright 2010 - 2013 Datera , Inc .
2012-07-19 01:31:32 +04:00
* ( C ) Copyright 2010 - 2012 IBM Corp .
*
* Licensed to the Linux Foundation under the General Public License ( GPL ) version 2.
*
2013-09-06 02:29:12 +04:00
* Authors : Nicholas A . Bellinger < nab @ daterainc . com >
2012-07-19 01:31:32 +04:00
* Stefan Hajnoczi < stefanha @ linux . vnet . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <generated/utsrelease.h>
# include <linux/utsname.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/kthread.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/configfs.h>
# include <linux/ctype.h>
# include <linux/compat.h>
# include <linux/eventfd.h>
# include <linux/fs.h>
# include <linux/miscdevice.h>
# include <asm/unaligned.h>
# include <scsi/scsi.h>
# include <scsi/scsi_tcq.h>
# include <target/target_core_base.h>
# include <target/target_core_fabric.h>
# include <target/target_core_fabric_configfs.h>
# include <target/target_core_configfs.h>
# include <target/configfs_macros.h>
# include <linux/vhost.h>
# include <linux/virtio_scsi.h>
2013-01-06 10:36:13 +04:00
# include <linux/llist.h>
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
# include <linux/bitmap.h>
2013-06-08 04:47:46 +04:00
# include <linux/percpu_ida.h>
2012-07-19 01:31:32 +04:00
# include "vhost.h"
2013-05-02 04:50:34 +04:00
# define TCM_VHOST_VERSION "v0.1"
# define TCM_VHOST_NAMELEN 256
# define TCM_VHOST_MAX_CDB_SIZE 32
2013-06-08 04:47:46 +04:00
# define TCM_VHOST_DEFAULT_TAGS 256
2013-06-22 01:32:04 +04:00
# define TCM_VHOST_PREALLOC_SGLS 2048
# define TCM_VHOST_PREALLOC_PAGES 2048
2013-05-02 04:50:34 +04:00
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp ;
/* Refcount for the inflight reqs */
struct kref kref ;
} ;
struct tcm_vhost_cmd {
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc ;
/* virtio-scsi initiator task attribute */
int tvc_task_attr ;
/* virtio-scsi initiator data direction */
enum dma_data_direction tvc_data_direction ;
/* Expected data transfer length from virtio-scsi header */
u32 tvc_exp_data_len ;
/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
u64 tvc_tag ;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count ;
/* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
u32 tvc_lun ;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist * tvc_sgl ;
2013-06-22 01:32:04 +04:00
struct page * * tvc_upages ;
2013-05-02 04:50:34 +04:00
/* Pointer to response */
struct virtio_scsi_cmd_resp __user * tvc_resp ;
/* Pointer to vhost_scsi for our device */
struct vhost_scsi * tvc_vhost ;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue * tvc_vq ;
/* Pointer to vhost nexus memory */
struct tcm_vhost_nexus * tvc_nexus ;
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd ;
/* work item used for cmwq dispatch to tcm_vhost_submission_work() */
struct work_struct work ;
/* Copy of the incoming SCSI command descriptor block (CDB) */
unsigned char tvc_cdb [ TCM_VHOST_MAX_CDB_SIZE ] ;
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf [ TRANSPORT_SENSE_BUFFER ] ;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list ;
/* Used to track inflight cmd */
struct vhost_scsi_inflight * inflight ;
} ;
struct tcm_vhost_nexus {
/* Pointer to TCM session for I_T Nexus */
struct se_session * tvn_se_sess ;
} ;
struct tcm_vhost_nacl {
/* Binary World Wide unique Port Name for Vhost Initiator port */
u64 iport_wwpn ;
/* ASCII formatted WWPN for Sas Initiator port */
char iport_name [ TCM_VHOST_NAMELEN ] ;
/* Returned by tcm_vhost_make_nodeacl() */
struct se_node_acl se_node_acl ;
} ;
struct tcm_vhost_tpg {
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt ;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
int tv_tpg_port_count ;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count ;
/* list for tcm_vhost_list */
struct list_head tv_tpg_list ;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex ;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
struct tcm_vhost_nexus * tpg_nexus ;
/* Pointer back to tcm_vhost_tport */
struct tcm_vhost_tport * tport ;
/* Returned by tcm_vhost_make_tpg() */
struct se_portal_group se_tpg ;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi * vhost_scsi ;
} ;
struct tcm_vhost_tport {
/* SCSI protocol the tport is providing */
u8 tport_proto_id ;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn ;
/* ASCII formatted WWPN for Vhost Target port */
char tport_name [ TCM_VHOST_NAMELEN ] ;
/* Returned by tcm_vhost_make_tport() */
struct se_wwn tport_wwn ;
} ;
struct tcm_vhost_evt {
/* event to be sent to guest */
struct virtio_scsi_event event ;
/* event list, serviced from vhost worker thread */
struct llist_node list ;
} ;
2012-07-19 01:31:32 +04:00
2012-07-31 00:30:00 +04:00
enum {
VHOST_SCSI_VQ_CTL = 0 ,
VHOST_SCSI_VQ_EVT = 1 ,
VHOST_SCSI_VQ_IO = 2 ,
} ;
2013-03-28 04:23:41 +04:00
enum {
2013-05-07 10:51:49 +04:00
VHOST_SCSI_FEATURES = VHOST_FEATURES | ( 1ULL < < VIRTIO_SCSI_F_HOTPLUG )
2013-03-28 04:23:41 +04:00
} ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
# define VHOST_SCSI_MAX_TARGET 256
# define VHOST_SCSI_MAX_VQ 128
2013-04-25 11:35:21 +04:00
# define VHOST_SCSI_MAX_EVENT 128
2013-02-05 08:31:57 +04:00
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq ;
2013-04-28 16:38:52 +04:00
/*
* Reference counting for inflight reqs , used for flush operation . At
* each time , one reference tracks new commands submitted , while we
* wait for another one to reach 0.
*/
2013-04-27 07:16:49 +04:00
struct vhost_scsi_inflight inflights [ 2 ] ;
2013-04-28 16:38:52 +04:00
/*
* Indicate current inflight in use , protected by vq - > mutex .
* Writers must also take dev mutex and flush under it .
*/
2013-04-27 07:16:49 +04:00
int inflight_idx ;
2013-04-27 07:16:48 +04:00
} ;
2012-07-19 01:31:32 +04:00
struct vhost_scsi {
2013-02-05 08:31:57 +04:00
/* Protected by vhost_scsi->dev.mutex */
2013-04-03 10:17:37 +04:00
struct tcm_vhost_tpg * * vs_tpg ;
2013-02-05 08:31:57 +04:00
char vs_vhost_wwpn [ TRANSPORT_IQN_LEN ] ;
2012-07-19 01:31:32 +04:00
struct vhost_dev dev ;
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue vqs [ VHOST_SCSI_MAX_VQ ] ;
2012-07-19 01:31:32 +04:00
struct vhost_work vs_completion_work ; /* cmd completion work item */
2013-01-06 10:36:13 +04:00
struct llist_head vs_completion_list ; /* cmd completion queue */
2013-04-25 11:35:21 +04:00
struct vhost_work vs_event_work ; /* evt injection work item */
struct llist_head vs_event_list ; /* evt injection queue */
bool vs_events_missed ; /* any missed events, protected by vq->mutex */
int vs_events_nr ; /* num of pending events, protected by vq->mutex */
2012-07-19 01:31:32 +04:00
} ;
/* Local pointer to allocated TCM configfs fabric module */
static struct target_fabric_configfs * tcm_vhost_fabric_configfs ;
static struct workqueue_struct * tcm_vhost_workqueue ;
/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
static DEFINE_MUTEX ( tcm_vhost_mutex ) ;
static LIST_HEAD ( tcm_vhost_list ) ;
2013-01-22 07:20:25 +04:00
static int iov_num_pages ( struct iovec * iov )
{
return ( PAGE_ALIGN ( ( unsigned long ) iov - > iov_base + iov - > iov_len ) -
( ( unsigned long ) iov - > iov_base & PAGE_MASK ) ) > > PAGE_SHIFT ;
}
2013-06-05 17:17:38 +04:00
static void tcm_vhost_done_inflight ( struct kref * kref )
2013-04-27 07:16:49 +04:00
{
struct vhost_scsi_inflight * inflight ;
inflight = container_of ( kref , struct vhost_scsi_inflight , kref ) ;
complete ( & inflight - > comp ) ;
}
static void tcm_vhost_init_inflight ( struct vhost_scsi * vs ,
struct vhost_scsi_inflight * old_inflight [ ] )
{
struct vhost_scsi_inflight * new_inflight ;
struct vhost_virtqueue * vq ;
int idx , i ;
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
mutex_lock ( & vq - > mutex ) ;
/* store old infight */
idx = vs - > vqs [ i ] . inflight_idx ;
if ( old_inflight )
old_inflight [ i ] = & vs - > vqs [ i ] . inflights [ idx ] ;
/* setup new infight */
vs - > vqs [ i ] . inflight_idx = idx ^ 1 ;
new_inflight = & vs - > vqs [ i ] . inflights [ idx ^ 1 ] ;
kref_init ( & new_inflight - > kref ) ;
init_completion ( & new_inflight - > comp ) ;
mutex_unlock ( & vq - > mutex ) ;
}
}
static struct vhost_scsi_inflight *
tcm_vhost_get_inflight ( struct vhost_virtqueue * vq )
{
struct vhost_scsi_inflight * inflight ;
struct vhost_scsi_virtqueue * svq ;
svq = container_of ( vq , struct vhost_scsi_virtqueue , vq ) ;
inflight = & svq - > inflights [ svq - > inflight_idx ] ;
kref_get ( & inflight - > kref ) ;
return inflight ;
}
static void tcm_vhost_put_inflight ( struct vhost_scsi_inflight * inflight )
{
kref_put ( & inflight - > kref , tcm_vhost_done_inflight ) ;
}
2012-07-19 01:31:32 +04:00
static int tcm_vhost_check_true ( struct se_portal_group * se_tpg )
{
return 1 ;
}
static int tcm_vhost_check_false ( struct se_portal_group * se_tpg )
{
return 0 ;
}
static char * tcm_vhost_get_fabric_name ( void )
{
return " vhost " ;
}
static u8 tcm_vhost_get_fabric_proto_ident ( struct se_portal_group * se_tpg )
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_tport * tport = tpg - > tport ;
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_fabric_proto_ident ( se_tpg ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_fabric_proto_ident ( se_tpg ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_fabric_proto_ident ( se_tpg ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_fabric_proto_ident ( se_tpg ) ;
}
static char * tcm_vhost_get_fabric_wwn ( struct se_portal_group * se_tpg )
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_tport * tport = tpg - > tport ;
return & tport - > tport_name [ 0 ] ;
}
static u16 tcm_vhost_get_tag ( struct se_portal_group * se_tpg )
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
return tpg - > tport_tpgt ;
}
static u32 tcm_vhost_get_default_depth ( struct se_portal_group * se_tpg )
{
return 1 ;
}
2013-05-06 12:38:27 +04:00
static u32
tcm_vhost_get_pr_transport_id ( struct se_portal_group * se_tpg ,
struct se_node_acl * se_nacl ,
struct t10_pr_registration * pr_reg ,
int * format_code ,
unsigned char * buf )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_tport * tport = tpg - > tport ;
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
}
2013-05-06 12:38:27 +04:00
static u32
tcm_vhost_get_pr_transport_id_len ( struct se_portal_group * se_tpg ,
struct se_node_acl * se_nacl ,
struct t10_pr_registration * pr_reg ,
int * format_code )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_tport * tport = tpg - > tport ;
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
}
2013-05-06 12:38:27 +04:00
static char *
tcm_vhost_parse_pr_out_transport_id ( struct se_portal_group * se_tpg ,
const char * buf ,
u32 * out_tid_len ,
char * * port_nexus_ptr )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_tport * tport = tpg - > tport ;
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
case SCSI_PROTOCOL_FCP :
return fc_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
}
2013-05-06 12:38:27 +04:00
static struct se_node_acl *
tcm_vhost_alloc_fabric_acl ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_nacl * nacl ;
nacl = kzalloc ( sizeof ( struct tcm_vhost_nacl ) , GFP_KERNEL ) ;
if ( ! nacl ) {
2012-11-05 18:30:40 +04:00
pr_err ( " Unable to allocate struct tcm_vhost_nacl \n " ) ;
2012-07-19 01:31:32 +04:00
return NULL ;
}
return & nacl - > se_node_acl ;
}
2013-05-06 12:38:27 +04:00
static void
tcm_vhost_release_fabric_acl ( struct se_portal_group * se_tpg ,
struct se_node_acl * se_nacl )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_nacl * nacl = container_of ( se_nacl ,
struct tcm_vhost_nacl , se_node_acl ) ;
kfree ( nacl ) ;
}
static u32 tcm_vhost_tpg_get_inst_index ( struct se_portal_group * se_tpg )
{
return 1 ;
}
static void tcm_vhost_release_cmd ( struct se_cmd * se_cmd )
{
2013-06-06 13:20:41 +04:00
struct tcm_vhost_cmd * tv_cmd = container_of ( se_cmd ,
struct tcm_vhost_cmd , tvc_se_cmd ) ;
2013-06-08 04:47:46 +04:00
struct se_session * se_sess = se_cmd - > se_sess ;
2013-06-06 13:20:41 +04:00
if ( tv_cmd - > tvc_sgl_count ) {
u32 i ;
for ( i = 0 ; i < tv_cmd - > tvc_sgl_count ; i + + )
put_page ( sg_page ( & tv_cmd - > tvc_sgl [ i ] ) ) ;
2013-09-17 23:54:31 +04:00
}
2013-06-06 13:20:41 +04:00
tcm_vhost_put_inflight ( tv_cmd - > inflight ) ;
2013-06-08 04:47:46 +04:00
percpu_ida_free ( & se_sess - > sess_tag_pool , se_cmd - > map_tag ) ;
2012-07-19 01:31:32 +04:00
}
static int tcm_vhost_shutdown_session ( struct se_session * se_sess )
{
return 0 ;
}
static void tcm_vhost_close_session ( struct se_session * se_sess )
{
return ;
}
static u32 tcm_vhost_sess_get_index ( struct se_session * se_sess )
{
return 0 ;
}
static int tcm_vhost_write_pending ( struct se_cmd * se_cmd )
{
/* Go ahead and process the write immediately */
target_execute_cmd ( se_cmd ) ;
return 0 ;
}
static int tcm_vhost_write_pending_status ( struct se_cmd * se_cmd )
{
return 0 ;
}
static void tcm_vhost_set_default_node_attrs ( struct se_node_acl * nacl )
{
return ;
}
static u32 tcm_vhost_get_task_tag ( struct se_cmd * se_cmd )
{
return 0 ;
}
static int tcm_vhost_get_cmd_state ( struct se_cmd * se_cmd )
{
return 0 ;
}
2013-05-06 12:38:29 +04:00
static void vhost_scsi_complete_cmd ( struct tcm_vhost_cmd * cmd )
2012-07-31 00:30:00 +04:00
{
2013-05-06 12:38:29 +04:00
struct vhost_scsi * vs = cmd - > tvc_vhost ;
2012-07-31 00:30:00 +04:00
2013-05-06 12:38:29 +04:00
llist_add ( & cmd - > tvc_completion_list , & vs - > vs_completion_list ) ;
2012-07-31 00:30:00 +04:00
vhost_work_queue ( & vs - > dev , & vs - > vs_completion_work ) ;
}
2012-07-19 01:31:32 +04:00
static int tcm_vhost_queue_data_in ( struct se_cmd * se_cmd )
{
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd = container_of ( se_cmd ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_cmd , tvc_se_cmd ) ;
2013-05-06 12:38:29 +04:00
vhost_scsi_complete_cmd ( cmd ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
static int tcm_vhost_queue_status ( struct se_cmd * se_cmd )
{
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd = container_of ( se_cmd ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_cmd , tvc_se_cmd ) ;
2013-05-06 12:38:29 +04:00
vhost_scsi_complete_cmd ( cmd ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2013-07-03 19:22:17 +04:00
static void tcm_vhost_queue_tm_rsp ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
2013-07-03 19:22:17 +04:00
return ;
2012-07-19 01:31:32 +04:00
}
2013-04-25 11:35:21 +04:00
static void tcm_vhost_free_evt ( struct vhost_scsi * vs , struct tcm_vhost_evt * evt )
{
vs - > vs_events_nr - - ;
kfree ( evt ) ;
}
2013-05-06 12:38:27 +04:00
static struct tcm_vhost_evt *
tcm_vhost_allocate_evt ( struct vhost_scsi * vs ,
u32 event , u32 reason )
2013-04-25 11:35:21 +04:00
{
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
struct tcm_vhost_evt * evt ;
if ( vs - > vs_events_nr > VHOST_SCSI_MAX_EVENT ) {
vs - > vs_events_missed = true ;
return NULL ;
}
evt = kzalloc ( sizeof ( * evt ) , GFP_KERNEL ) ;
if ( ! evt ) {
vq_err ( vq , " Failed to allocate tcm_vhost_evt \n " ) ;
vs - > vs_events_missed = true ;
return NULL ;
}
evt - > event . event = event ;
evt - > event . reason = reason ;
vs - > vs_events_nr + + ;
return evt ;
}
2013-05-06 12:38:29 +04:00
static void vhost_scsi_free_cmd ( struct tcm_vhost_cmd * cmd )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:29 +04:00
struct se_cmd * se_cmd = & cmd - > tvc_se_cmd ;
2012-07-19 01:31:32 +04:00
/* TODO locking against target/backend threads? */
2013-06-06 12:44:48 +04:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
2012-07-19 01:31:32 +04:00
2013-06-06 13:20:41 +04:00
}
2013-04-27 07:16:49 +04:00
2013-06-06 13:20:41 +04:00
static int vhost_scsi_check_stop_free ( struct se_cmd * se_cmd )
{
return target_put_sess_cmd ( se_cmd - > se_sess , se_cmd ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static void
tcm_vhost_do_evt_work ( struct vhost_scsi * vs , struct tcm_vhost_evt * evt )
2013-04-25 11:35:21 +04:00
{
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
struct virtio_scsi_event * event = & evt - > event ;
struct virtio_scsi_event __user * eventp ;
unsigned out , in ;
int head , ret ;
if ( ! vq - > private_data ) {
vs - > vs_events_missed = true ;
return ;
}
again :
vhost_disable_notify ( & vs - > dev , vq ) ;
head = vhost_get_vq_desc ( & vs - > dev , vq , vq - > iov ,
ARRAY_SIZE ( vq - > iov ) , & out , & in ,
NULL , NULL ) ;
if ( head < 0 ) {
vs - > vs_events_missed = true ;
return ;
}
if ( head = = vq - > num ) {
if ( vhost_enable_notify ( & vs - > dev , vq ) )
goto again ;
vs - > vs_events_missed = true ;
return ;
}
if ( ( vq - > iov [ out ] . iov_len ! = sizeof ( struct virtio_scsi_event ) ) ) {
vq_err ( vq , " Expecting virtio_scsi_event, got %zu bytes \n " ,
vq - > iov [ out ] . iov_len ) ;
vs - > vs_events_missed = true ;
return ;
}
if ( vs - > vs_events_missed ) {
event - > event | = VIRTIO_SCSI_T_EVENTS_MISSED ;
vs - > vs_events_missed = false ;
}
eventp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( eventp , event , sizeof ( * event ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
vq_err ( vq , " Faulted on tcm_vhost_send_event \n " ) ;
}
static void tcm_vhost_evt_work ( struct vhost_work * work )
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_event_work ) ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
struct tcm_vhost_evt * evt ;
struct llist_node * llnode ;
mutex_lock ( & vq - > mutex ) ;
llnode = llist_del_all ( & vs - > vs_event_list ) ;
while ( llnode ) {
evt = llist_entry ( llnode , struct tcm_vhost_evt , list ) ;
llnode = llist_next ( llnode ) ;
tcm_vhost_do_evt_work ( vs , evt ) ;
tcm_vhost_free_evt ( vs , evt ) ;
}
mutex_unlock ( & vq - > mutex ) ;
}
2012-07-19 01:31:32 +04:00
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
* process mm and can access the vring .
*/
static void vhost_scsi_complete_cmd_work ( struct vhost_work * work )
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_completion_work ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
DECLARE_BITMAP ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 10:36:13 +04:00
struct virtio_scsi_cmd_resp v_rsp ;
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd ;
2013-01-06 10:36:13 +04:00
struct llist_node * llnode ;
struct se_cmd * se_cmd ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
int ret , vq ;
2012-07-19 01:31:32 +04:00
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
bitmap_zero ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 10:36:13 +04:00
llnode = llist_del_all ( & vs - > vs_completion_list ) ;
while ( llnode ) {
2013-05-06 12:38:29 +04:00
cmd = llist_entry ( llnode , struct tcm_vhost_cmd ,
2013-01-06 10:36:13 +04:00
tvc_completion_list ) ;
llnode = llist_next ( llnode ) ;
2013-05-06 12:38:29 +04:00
se_cmd = & cmd - > tvc_se_cmd ;
2012-07-19 01:31:32 +04:00
pr_debug ( " %s tv_cmd %p resid %u status %#02x \n " , __func__ ,
2013-05-06 12:38:29 +04:00
cmd , se_cmd - > residual_count , se_cmd - > scsi_status ) ;
2012-07-19 01:31:32 +04:00
memset ( & v_rsp , 0 , sizeof ( v_rsp ) ) ;
v_rsp . resid = se_cmd - > residual_count ;
/* TODO is status_qualifier field needed? */
v_rsp . status = se_cmd - > scsi_status ;
v_rsp . sense_len = se_cmd - > scsi_sense_length ;
2013-05-06 12:38:29 +04:00
memcpy ( v_rsp . sense , cmd - > tvc_sense_buf ,
2012-07-19 01:31:32 +04:00
v_rsp . sense_len ) ;
2013-05-06 12:38:29 +04:00
ret = copy_to_user ( cmd - > tvc_resp , & v_rsp , sizeof ( v_rsp ) ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
if ( likely ( ret = = 0 ) ) {
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue * q ;
2013-05-06 12:38:29 +04:00
vhost_add_used ( cmd - > tvc_vq , cmd - > tvc_vq_desc , 0 ) ;
q = container_of ( cmd - > tvc_vq , struct vhost_scsi_virtqueue , vq ) ;
2013-04-27 07:16:48 +04:00
vq = q - vs - > vqs ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
__set_bit ( vq , signal ) ;
} else
2012-07-19 01:31:32 +04:00
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
2013-05-06 12:38:29 +04:00
vhost_scsi_free_cmd ( cmd ) ;
2012-07-19 01:31:32 +04:00
}
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
vq = - 1 ;
while ( ( vq = find_next_bit ( signal , VHOST_SCSI_MAX_VQ , vq + 1 ) )
< VHOST_SCSI_MAX_VQ )
2013-04-27 07:16:48 +04:00
vhost_signal ( & vs - > dev , & vs - > vqs [ vq ] . vq ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static struct tcm_vhost_cmd *
2013-06-08 04:47:46 +04:00
vhost_scsi_get_tag ( struct vhost_virtqueue * vq ,
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg ,
2013-05-06 12:38:27 +04:00
struct virtio_scsi_cmd_req * v_req ,
u32 exp_data_len ,
int data_direction )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd ;
2012-07-19 01:31:32 +04:00
struct tcm_vhost_nexus * tv_nexus ;
2013-06-08 04:47:46 +04:00
struct se_session * se_sess ;
2013-06-22 01:32:04 +04:00
struct scatterlist * sg ;
struct page * * pages ;
2013-06-08 04:47:46 +04:00
int tag ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
tv_nexus = tpg - > tpg_nexus ;
2012-07-19 01:31:32 +04:00
if ( ! tv_nexus ) {
pr_err ( " Unable to locate active struct tcm_vhost_nexus \n " ) ;
return ERR_PTR ( - EIO ) ;
}
2013-06-08 04:47:46 +04:00
se_sess = tv_nexus - > tvn_se_sess ;
2012-07-19 01:31:32 +04:00
2013-06-08 04:47:46 +04:00
tag = percpu_ida_alloc ( & se_sess - > sess_tag_pool , GFP_KERNEL ) ;
cmd = & ( ( struct tcm_vhost_cmd * ) se_sess - > sess_cmd_map ) [ tag ] ;
2013-06-22 01:32:04 +04:00
sg = cmd - > tvc_sgl ;
pages = cmd - > tvc_upages ;
2013-06-08 04:47:46 +04:00
memset ( cmd , 0 , sizeof ( struct tcm_vhost_cmd ) ) ;
2013-06-22 01:32:04 +04:00
cmd - > tvc_sgl = sg ;
cmd - > tvc_upages = pages ;
2013-06-08 04:47:46 +04:00
cmd - > tvc_se_cmd . map_tag = tag ;
2013-05-06 12:38:29 +04:00
cmd - > tvc_tag = v_req - > tag ;
cmd - > tvc_task_attr = v_req - > task_attr ;
cmd - > tvc_exp_data_len = exp_data_len ;
cmd - > tvc_data_direction = data_direction ;
cmd - > tvc_nexus = tv_nexus ;
cmd - > inflight = tcm_vhost_get_inflight ( vq ) ;
return cmd ;
2012-07-19 01:31:32 +04:00
}
/*
* Map a user memory range into a scatterlist
*
* Returns the number of scatterlist entries used or - errno on error .
*/
2013-05-06 12:38:27 +04:00
static int
2013-06-22 01:32:04 +04:00
vhost_scsi_map_to_sgl ( struct tcm_vhost_cmd * tv_cmd ,
struct scatterlist * sgl ,
2013-05-06 12:38:27 +04:00
unsigned int sgl_count ,
struct iovec * iov ,
int write )
2012-07-19 01:31:32 +04:00
{
2013-01-22 07:20:27 +04:00
unsigned int npages = 0 , pages_nr , offset , nbytes ;
2012-07-19 01:31:32 +04:00
struct scatterlist * sg = sgl ;
2013-01-22 07:20:27 +04:00
void __user * ptr = iov - > iov_base ;
size_t len = iov - > iov_len ;
struct page * * pages ;
int ret , i ;
2012-07-19 01:31:32 +04:00
2013-06-22 01:32:04 +04:00
if ( sgl_count > TCM_VHOST_PREALLOC_SGLS ) {
pr_err ( " vhost_scsi_map_to_sgl() psgl_count: %u greater than "
" preallocated TCM_VHOST_PREALLOC_SGLS: %u \n " ,
sgl_count , TCM_VHOST_PREALLOC_SGLS ) ;
return - ENOBUFS ;
}
2013-01-22 07:20:27 +04:00
pages_nr = iov_num_pages ( iov ) ;
if ( pages_nr > sgl_count )
return - ENOBUFS ;
2012-07-19 01:31:32 +04:00
2013-06-22 01:32:04 +04:00
if ( pages_nr > TCM_VHOST_PREALLOC_PAGES ) {
pr_err ( " vhost_scsi_map_to_sgl() pages_nr: %u greater than "
" preallocated TCM_VHOST_PREALLOC_PAGES: %u \n " ,
pages_nr , TCM_VHOST_PREALLOC_PAGES ) ;
return - ENOBUFS ;
}
pages = tv_cmd - > tvc_upages ;
2012-07-19 01:31:32 +04:00
2013-01-22 07:20:27 +04:00
ret = get_user_pages_fast ( ( unsigned long ) ptr , pages_nr , write , pages ) ;
/* No pages were pinned */
if ( ret < 0 )
goto out ;
/* Less pages pinned than wanted */
if ( ret ! = pages_nr ) {
for ( i = 0 ; i < ret ; i + + )
put_page ( pages [ i ] ) ;
ret = - EFAULT ;
goto out ;
}
while ( len > 0 ) {
offset = ( uintptr_t ) ptr & ~ PAGE_MASK ;
nbytes = min_t ( unsigned int , PAGE_SIZE - offset , len ) ;
sg_set_page ( sg , pages [ npages ] , nbytes , offset ) ;
2012-07-19 01:31:32 +04:00
ptr + = nbytes ;
len - = nbytes ;
sg + + ;
npages + + ;
}
2013-01-22 07:20:27 +04:00
out :
2012-07-19 01:31:32 +04:00
return ret ;
}
2013-05-06 12:38:27 +04:00
static int
2013-05-06 12:38:29 +04:00
vhost_scsi_map_iov_to_sgl ( struct tcm_vhost_cmd * cmd ,
2013-05-06 12:38:27 +04:00
struct iovec * iov ,
unsigned int niov ,
int write )
2012-07-19 01:31:32 +04:00
{
int ret ;
unsigned int i ;
u32 sgl_count ;
struct scatterlist * sg ;
/*
* Find out how long sglist needs to be
*/
sgl_count = 0 ;
2013-01-22 07:20:26 +04:00
for ( i = 0 ; i < niov ; i + + )
sgl_count + = iov_num_pages ( & iov [ i ] ) ;
2012-07-19 01:31:32 +04:00
/* TODO overflow checking */
2013-06-22 01:32:04 +04:00
sg = cmd - > tvc_sgl ;
pr_debug ( " %s sg %p sgl_count %u \n " , __func__ , sg , sgl_count ) ;
2012-07-19 01:31:32 +04:00
sg_init_table ( sg , sgl_count ) ;
2013-05-06 12:38:29 +04:00
cmd - > tvc_sgl_count = sgl_count ;
2012-07-19 01:31:32 +04:00
pr_debug ( " Mapping %u iovecs for %u pages \n " , niov , sgl_count ) ;
for ( i = 0 ; i < niov ; i + + ) {
2013-06-22 01:32:04 +04:00
ret = vhost_scsi_map_to_sgl ( cmd , sg , sgl_count , & iov [ i ] ,
write ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 ) {
2013-05-06 12:38:29 +04:00
for ( i = 0 ; i < cmd - > tvc_sgl_count ; i + + )
put_page ( sg_page ( & cmd - > tvc_sgl [ i ] ) ) ;
2013-06-22 01:32:04 +04:00
2013-05-06 12:38:29 +04:00
cmd - > tvc_sgl_count = 0 ;
2012-07-19 01:31:32 +04:00
return ret ;
}
sg + = ret ;
sgl_count - = ret ;
}
return 0 ;
}
static void tcm_vhost_submission_work ( struct work_struct * work )
{
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd =
2012-07-19 01:31:32 +04:00
container_of ( work , struct tcm_vhost_cmd , work ) ;
2012-10-02 05:40:55 +04:00
struct tcm_vhost_nexus * tv_nexus ;
2013-05-06 12:38:29 +04:00
struct se_cmd * se_cmd = & cmd - > tvc_se_cmd ;
2012-07-19 01:31:32 +04:00
struct scatterlist * sg_ptr , * sg_bidi_ptr = NULL ;
int rc , sg_no_bidi = 0 ;
2013-05-06 12:38:29 +04:00
if ( cmd - > tvc_sgl_count ) {
sg_ptr = cmd - > tvc_sgl ;
2012-07-19 01:31:32 +04:00
/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */
#if 0
if ( se_cmd - > se_cmd_flags & SCF_BIDI ) {
sg_bidi_ptr = NULL ;
sg_no_bidi = 0 ;
}
# endif
} else {
sg_ptr = NULL ;
}
2013-05-06 12:38:29 +04:00
tv_nexus = cmd - > tvc_nexus ;
2012-10-02 05:40:55 +04:00
rc = target_submit_cmd_map_sgls ( se_cmd , tv_nexus - > tvn_se_sess ,
2013-05-06 12:38:29 +04:00
cmd - > tvc_cdb , & cmd - > tvc_sense_buf [ 0 ] ,
cmd - > tvc_lun , cmd - > tvc_exp_data_len ,
cmd - > tvc_task_attr , cmd - > tvc_data_direction ,
2013-07-11 23:57:19 +04:00
TARGET_SCF_ACK_KREF , sg_ptr , cmd - > tvc_sgl_count ,
2012-10-02 05:40:55 +04:00
sg_bidi_ptr , sg_no_bidi ) ;
2012-07-19 01:31:32 +04:00
if ( rc < 0 ) {
transport_send_check_condition_and_sense ( se_cmd ,
2012-10-02 05:40:55 +04:00
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE , 0 ) ;
2012-07-19 01:31:32 +04:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
}
}
2013-05-06 12:38:27 +04:00
static void
vhost_scsi_send_bad_target ( struct vhost_scsi * vs ,
struct vhost_virtqueue * vq ,
int head , unsigned out )
2013-04-10 11:06:15 +04:00
{
struct virtio_scsi_cmd_resp __user * resp ;
struct virtio_scsi_cmd_resp rsp ;
int ret ;
memset ( & rsp , 0 , sizeof ( rsp ) ) ;
rsp . response = VIRTIO_SCSI_S_BAD_TARGET ;
resp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( resp , & rsp , sizeof ( rsp ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
}
2013-05-06 12:38:27 +04:00
static void
vhost_scsi_handle_vq ( struct vhost_scsi * vs , struct vhost_virtqueue * vq )
2012-07-19 01:31:32 +04:00
{
2013-04-03 10:17:37 +04:00
struct tcm_vhost_tpg * * vs_tpg ;
2012-07-19 01:31:32 +04:00
struct virtio_scsi_cmd_req v_req ;
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg ;
2013-05-06 12:38:29 +04:00
struct tcm_vhost_cmd * cmd ;
2012-07-19 01:31:32 +04:00
u32 exp_data_len , data_first , data_num , data_direction ;
unsigned out , in , i ;
int head , ret ;
2013-02-05 08:31:57 +04:00
u8 target ;
2012-07-19 01:31:32 +04:00
2013-05-07 10:54:35 +04:00
mutex_lock ( & vq - > mutex ) ;
2013-04-03 10:17:37 +04:00
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl .
*/
2013-05-07 10:54:35 +04:00
vs_tpg = vq - > private_data ;
2013-04-03 10:17:37 +04:00
if ( ! vs_tpg )
2013-05-07 10:54:35 +04:00
goto out ;
2012-07-19 01:31:32 +04:00
vhost_disable_notify ( & vs - > dev , vq ) ;
for ( ; ; ) {
head = vhost_get_vq_desc ( & vs - > dev , vq , vq - > iov ,
ARRAY_SIZE ( vq - > iov ) , & out , & in ,
NULL , NULL ) ;
pr_debug ( " vhost_get_vq_desc: head: %d, out: %u in: %u \n " ,
head , out , in ) ;
/* On error, stop handling until the next kick. */
if ( unlikely ( head < 0 ) )
break ;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if ( head = = vq - > num ) {
if ( unlikely ( vhost_enable_notify ( & vs - > dev , vq ) ) ) {
vhost_disable_notify ( & vs - > dev , vq ) ;
continue ;
}
break ;
}
/* FIXME: BIDI operation */
if ( out = = 1 & & in = = 1 ) {
data_direction = DMA_NONE ;
data_first = 0 ;
data_num = 0 ;
} else if ( out = = 1 & & in > 1 ) {
data_direction = DMA_FROM_DEVICE ;
data_first = out + 1 ;
data_num = in - 1 ;
} else if ( out > 1 & & in = = 1 ) {
data_direction = DMA_TO_DEVICE ;
data_first = 1 ;
data_num = out - 1 ;
} else {
vq_err ( vq , " Invalid buffer layout out: %u in: %u \n " ,
out , in ) ;
break ;
}
/*
* Check for a sane resp buffer so we can report errors to
* the guest .
*/
if ( unlikely ( vq - > iov [ out ] . iov_len ! =
sizeof ( struct virtio_scsi_cmd_resp ) ) ) {
vq_err ( vq , " Expecting virtio_scsi_cmd_resp, got %zu "
" bytes \n " , vq - > iov [ out ] . iov_len ) ;
break ;
}
if ( unlikely ( vq - > iov [ 0 ] . iov_len ! = sizeof ( v_req ) ) ) {
vq_err ( vq , " Expecting virtio_scsi_cmd_req, got %zu "
" bytes \n " , vq - > iov [ 0 ] . iov_len ) ;
break ;
}
pr_debug ( " Calling __copy_from_user: vq->iov[0].iov_base: %p, "
" len: %zu \n " , vq - > iov [ 0 ] . iov_base , sizeof ( v_req ) ) ;
ret = __copy_from_user ( & v_req , vq - > iov [ 0 ] . iov_base ,
sizeof ( v_req ) ) ;
if ( unlikely ( ret ) ) {
vq_err ( vq , " Faulted on virtio_scsi_cmd_req \n " ) ;
break ;
}
2013-02-05 08:31:57 +04:00
/* Extract the tpgt */
target = v_req . lun [ 1 ] ;
2013-05-06 12:38:28 +04:00
tpg = ACCESS_ONCE ( vs_tpg [ target ] ) ;
2013-02-05 08:31:57 +04:00
/* Target does not exist, fail the request */
2013-05-06 12:38:28 +04:00
if ( unlikely ( ! tpg ) ) {
2013-04-10 11:06:15 +04:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
2013-02-05 08:31:57 +04:00
continue ;
}
2012-07-19 01:31:32 +04:00
exp_data_len = 0 ;
for ( i = 0 ; i < data_num ; i + + )
exp_data_len + = vq - > iov [ data_first + i ] . iov_len ;
2013-06-08 04:47:46 +04:00
cmd = vhost_scsi_get_tag ( vq , tpg , & v_req ,
exp_data_len , data_direction ) ;
2013-05-06 12:38:29 +04:00
if ( IS_ERR ( cmd ) ) {
2013-06-08 04:47:46 +04:00
vq_err ( vq , " vhost_scsi_get_tag failed %ld \n " ,
2013-05-06 12:38:29 +04:00
PTR_ERR ( cmd ) ) ;
2013-04-10 11:06:16 +04:00
goto err_cmd ;
2012-07-19 01:31:32 +04:00
}
pr_debug ( " Allocated tv_cmd: %p exp_data_len: %d, data_direction "
2013-05-06 12:38:29 +04:00
" : %d \n " , cmd , exp_data_len , data_direction ) ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:29 +04:00
cmd - > tvc_vhost = vs ;
cmd - > tvc_vq = vq ;
cmd - > tvc_resp = vq - > iov [ out ] . iov_base ;
2012-07-19 01:31:32 +04:00
/*
2013-05-06 12:38:29 +04:00
* Copy in the recieved CDB descriptor into cmd - > tvc_cdb
2012-07-19 01:31:32 +04:00
* that will be used by tcm_vhost_new_cmd_map ( ) and down into
* target_setup_cmd_from_cdb ( )
*/
2013-05-06 12:38:29 +04:00
memcpy ( cmd - > tvc_cdb , v_req . cdb , TCM_VHOST_MAX_CDB_SIZE ) ;
2012-07-19 01:31:32 +04:00
/*
* Check that the recieved CDB size does not exceeded our
* hardcoded max for tcm_vhost
*/
/* TODO what if cdb was too small for varlen cdb header? */
2013-05-06 12:38:29 +04:00
if ( unlikely ( scsi_command_size ( cmd - > tvc_cdb ) >
2012-07-19 01:31:32 +04:00
TCM_VHOST_MAX_CDB_SIZE ) ) {
vq_err ( vq , " Received SCSI CDB with command_size: %d that "
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d \n " ,
2013-05-06 12:38:29 +04:00
scsi_command_size ( cmd - > tvc_cdb ) ,
2012-07-19 01:31:32 +04:00
TCM_VHOST_MAX_CDB_SIZE ) ;
2013-04-10 11:06:16 +04:00
goto err_free ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:29 +04:00
cmd - > tvc_lun = ( ( v_req . lun [ 2 ] < < 8 ) | v_req . lun [ 3 ] ) & 0x3FFF ;
2012-07-19 01:31:32 +04:00
pr_debug ( " vhost_scsi got command opcode: %#02x, lun: %d \n " ,
2013-05-06 12:38:29 +04:00
cmd - > tvc_cdb [ 0 ] , cmd - > tvc_lun ) ;
2012-07-19 01:31:32 +04:00
if ( data_direction ! = DMA_NONE ) {
2013-05-06 12:38:29 +04:00
ret = vhost_scsi_map_iov_to_sgl ( cmd ,
2012-07-19 01:31:32 +04:00
& vq - > iov [ data_first ] , data_num ,
data_direction = = DMA_TO_DEVICE ) ;
if ( unlikely ( ret ) ) {
vq_err ( vq , " Failed to map iov to sgl \n " ) ;
2013-04-10 11:06:16 +04:00
goto err_free ;
2012-07-19 01:31:32 +04:00
}
}
/*
* Save the descriptor from vhost_get_vq_desc ( ) to be used to
* complete the virtio - scsi request in TCM callback context via
* tcm_vhost_queue_data_in ( ) and tcm_vhost_queue_status ( )
*/
2013-05-06 12:38:29 +04:00
cmd - > tvc_vq_desc = head ;
2012-07-19 01:31:32 +04:00
/*
* Dispatch tv_cmd descriptor for cmwq execution in process
* context provided by tcm_vhost_workqueue . This also ensures
* tv_cmd is executed on the same kworker CPU as this vhost
* thread to gain positive L2 cache locality effects . .
*/
2013-05-06 12:38:29 +04:00
INIT_WORK ( & cmd - > work , tcm_vhost_submission_work ) ;
queue_work ( tcm_vhost_workqueue , & cmd - > work ) ;
2012-07-19 01:31:32 +04:00
}
mutex_unlock ( & vq - > mutex ) ;
2013-04-10 11:06:14 +04:00
return ;
2013-04-10 11:06:16 +04:00
err_free :
2013-05-06 12:38:29 +04:00
vhost_scsi_free_cmd ( cmd ) ;
2013-04-10 11:06:16 +04:00
err_cmd :
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
2013-05-07 10:54:35 +04:00
out :
2013-04-10 11:06:14 +04:00
mutex_unlock ( & vq - > mutex ) ;
2012-07-19 01:31:32 +04:00
}
static void vhost_scsi_ctl_handle_kick ( struct vhost_work * work )
{
2012-07-31 00:30:00 +04:00
pr_debug ( " %s: The handling func for control queue. \n " , __func__ ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static void
tcm_vhost_send_evt ( struct vhost_scsi * vs ,
struct tcm_vhost_tpg * tpg ,
struct se_lun * lun ,
u32 event ,
u32 reason )
2013-04-25 11:35:21 +04:00
{
struct tcm_vhost_evt * evt ;
evt = tcm_vhost_allocate_evt ( vs , event , reason ) ;
if ( ! evt )
return ;
if ( tpg & & lun ) {
/* TODO: share lun setup code with virtio-scsi.ko */
/*
* Note : evt - > event is zeroed when we allocate it and
* lun [ 4 - 7 ] need to be zero according to virtio - scsi spec .
*/
evt - > event . lun [ 0 ] = 0x01 ;
evt - > event . lun [ 1 ] = tpg - > tport_tpgt & 0xFF ;
if ( lun - > unpacked_lun > = 256 )
evt - > event . lun [ 2 ] = lun - > unpacked_lun > > 8 | 0x40 ;
evt - > event . lun [ 3 ] = lun - > unpacked_lun & 0xFF ;
}
llist_add ( & evt - > list , & vs - > vs_event_list ) ;
vhost_work_queue ( & vs - > dev , & vs - > vs_event_work ) ;
}
2012-07-19 01:31:32 +04:00
static void vhost_scsi_evt_handle_kick ( struct vhost_work * work )
{
2013-04-25 11:35:21 +04:00
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
mutex_lock ( & vq - > mutex ) ;
if ( ! vq - > private_data )
goto out ;
if ( vs - > vs_events_missed )
tcm_vhost_send_evt ( vs , NULL , NULL , VIRTIO_SCSI_T_NO_EVENT , 0 ) ;
out :
mutex_unlock ( & vq - > mutex ) ;
2012-07-19 01:31:32 +04:00
}
static void vhost_scsi_handle_kick ( struct vhost_work * work )
{
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
vhost_scsi_handle_vq ( vs , vq ) ;
2012-07-19 01:31:32 +04:00
}
2013-04-03 10:17:37 +04:00
static void vhost_scsi_flush_vq ( struct vhost_scsi * vs , int index )
{
2013-04-27 07:16:48 +04:00
vhost_poll_flush ( & vs - > vqs [ index ] . vq . poll ) ;
2013-04-03 10:17:37 +04:00
}
2013-04-28 16:38:52 +04:00
/* Callers must hold dev mutex */
2013-04-03 10:17:37 +04:00
static void vhost_scsi_flush ( struct vhost_scsi * vs )
{
2013-04-27 07:16:49 +04:00
struct vhost_scsi_inflight * old_inflight [ VHOST_SCSI_MAX_VQ ] ;
2013-04-03 10:17:37 +04:00
int i ;
2013-04-27 07:16:49 +04:00
/* Init new inflight and remember the old inflight */
tcm_vhost_init_inflight ( vs , old_inflight ) ;
/*
* The inflight - > kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished .
*/
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
kref_put ( & old_inflight [ i ] - > kref , tcm_vhost_done_inflight ) ;
/* Flush both the vhost poll and vhost work */
2013-04-03 10:17:37 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
vhost_scsi_flush_vq ( vs , i ) ;
vhost_work_flush ( & vs - > dev , & vs - > vs_completion_work ) ;
2013-04-25 11:35:21 +04:00
vhost_work_flush ( & vs - > dev , & vs - > vs_event_work ) ;
2013-04-27 07:16:49 +04:00
/* Wait for all reqs issued before the flush to be finished */
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
wait_for_completion ( & old_inflight [ i ] - > comp ) ;
2013-04-03 10:17:37 +04:00
}
2012-07-19 01:31:32 +04:00
/*
* Called from vhost_scsi_ioctl ( ) context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus
2013-04-25 11:35:20 +04:00
*
* The lock nesting rule is :
* tcm_vhost_mutex - > vs - > dev . mutex - > tpg - > tv_tpg_mutex - > vq - > mutex
2012-07-19 01:31:32 +04:00
*/
2013-05-06 12:38:27 +04:00
static int
vhost_scsi_set_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tport * tv_tport ;
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg ;
2013-04-03 10:17:37 +04:00
struct tcm_vhost_tpg * * vs_tpg ;
struct vhost_virtqueue * vq ;
int index , ret , i , len ;
2013-02-05 08:31:57 +04:00
bool match = false ;
2012-07-19 01:31:32 +04:00
2013-04-25 11:35:20 +04:00
mutex_lock ( & tcm_vhost_mutex ) ;
2012-07-19 01:31:32 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
2013-04-25 11:35:20 +04:00
2012-07-19 01:31:32 +04:00
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
/* Verify that ring has been setup correctly. */
2013-04-27 07:16:48 +04:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2013-04-25 11:35:20 +04:00
ret = - EFAULT ;
goto out ;
2012-07-19 01:31:32 +04:00
}
}
2013-04-03 10:17:37 +04:00
len = sizeof ( vs_tpg [ 0 ] ) * VHOST_SCSI_MAX_TARGET ;
vs_tpg = kzalloc ( len , GFP_KERNEL ) ;
if ( ! vs_tpg ) {
2013-04-25 11:35:20 +04:00
ret = - ENOMEM ;
goto out ;
2013-04-03 10:17:37 +04:00
}
if ( vs - > vs_tpg )
memcpy ( vs_tpg , vs - > vs_tpg , len ) ;
2013-05-06 12:38:28 +04:00
list_for_each_entry ( tpg , & tcm_vhost_list , tv_tpg_list ) {
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( ! tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
continue ;
}
2013-05-06 12:38:28 +04:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
continue ;
}
2013-05-06 12:38:28 +04:00
tv_tport = tpg - > tport ;
2012-07-19 01:31:32 +04:00
2013-02-05 08:31:57 +04:00
if ( ! strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 12:38:28 +04:00
if ( vs - > vs_tpg & & vs - > vs_tpg [ tpg - > tport_tpgt ] ) {
2013-04-03 10:17:37 +04:00
kfree ( vs_tpg ) ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 11:35:20 +04:00
ret = - EEXIST ;
goto out ;
2012-07-31 00:30:00 +04:00
}
2013-05-06 12:38:28 +04:00
tpg - > tv_tpg_vhost_count + + ;
tpg - > vhost_scsi = vs ;
vs_tpg [ tpg - > tport_tpgt ] = tpg ;
2012-07-19 01:31:32 +04:00
smp_mb__after_atomic_inc ( ) ;
2013-02-05 08:31:57 +04:00
match = true ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
}
2013-02-05 08:31:57 +04:00
if ( match ) {
memcpy ( vs - > vs_vhost_wwpn , t - > vhost_wwpn ,
sizeof ( vs - > vs_vhost_wwpn ) ) ;
2013-04-03 10:17:37 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 10:17:37 +04:00
mutex_lock ( & vq - > mutex ) ;
2013-05-07 10:54:36 +04:00
vq - > private_data = vs_tpg ;
2013-04-03 10:17:38 +04:00
vhost_init_used ( vq ) ;
2013-04-03 10:17:37 +04:00
mutex_unlock ( & vq - > mutex ) ;
}
2013-02-05 08:31:57 +04:00
ret = 0 ;
} else {
ret = - EEXIST ;
}
2013-04-03 10:17:37 +04:00
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = vs_tpg ;
2013-04-25 11:35:20 +04:00
out :
2013-02-05 08:31:57 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2013-04-25 11:35:20 +04:00
mutex_unlock ( & tcm_vhost_mutex ) ;
2013-02-05 08:31:57 +04:00
return ret ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static int
vhost_scsi_clear_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tport * tv_tport ;
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg ;
2013-04-03 10:17:37 +04:00
struct vhost_virtqueue * vq ;
bool match = false ;
2013-02-05 08:31:57 +04:00
int index , ret , i ;
u8 target ;
2012-07-19 01:31:32 +04:00
2013-04-25 11:35:20 +04:00
mutex_lock ( & tcm_vhost_mutex ) ;
2012-07-19 01:31:32 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
2013-04-27 07:16:48 +04:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2012-07-31 00:30:00 +04:00
ret = - EFAULT ;
2013-03-15 05:14:05 +04:00
goto err_dev ;
2012-07-19 01:31:32 +04:00
}
}
2013-04-03 10:17:37 +04:00
if ( ! vs - > vs_tpg ) {
2013-04-25 11:35:20 +04:00
ret = 0 ;
goto err_dev ;
2013-04-03 10:17:37 +04:00
}
2013-02-05 08:31:57 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_TARGET ; i + + ) {
target = i ;
2013-05-06 12:38:28 +04:00
tpg = vs - > vs_tpg [ target ] ;
if ( ! tpg )
2013-02-05 08:31:57 +04:00
continue ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_tport = tpg - > tport ;
2013-02-05 08:31:57 +04:00
if ( ! tv_tport ) {
ret = - ENODEV ;
2013-03-15 05:14:05 +04:00
goto err_tpg ;
2013-02-05 08:31:57 +04:00
}
if ( strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 12:38:28 +04:00
pr_warn ( " tv_tport->tport_name: %s, tpg->tport_tpgt: %hu "
2013-02-05 08:31:57 +04:00
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu \n " ,
2013-05-06 12:38:28 +04:00
tv_tport - > tport_name , tpg - > tport_tpgt ,
2013-02-05 08:31:57 +04:00
t - > vhost_wwpn , t - > vhost_tpgt ) ;
ret = - EINVAL ;
2013-03-15 05:14:05 +04:00
goto err_tpg ;
2013-02-05 08:31:57 +04:00
}
2013-05-06 12:38:28 +04:00
tpg - > tv_tpg_vhost_count - - ;
tpg - > vhost_scsi = NULL ;
2013-02-05 08:31:57 +04:00
vs - > vs_tpg [ target ] = NULL ;
2013-04-03 10:17:37 +04:00
match = true ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
}
2013-04-03 10:17:37 +04:00
if ( match ) {
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 10:17:37 +04:00
mutex_lock ( & vq - > mutex ) ;
2013-05-07 10:54:36 +04:00
vq - > private_data = NULL ;
2013-04-03 10:17:37 +04:00
mutex_unlock ( & vq - > mutex ) ;
}
}
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = NULL ;
2013-04-25 11:35:21 +04:00
WARN_ON ( vs - > vs_events_nr ) ;
2012-07-19 01:31:32 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2013-04-25 11:35:20 +04:00
mutex_unlock ( & tcm_vhost_mutex ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
2012-07-31 00:30:00 +04:00
2013-03-15 05:14:05 +04:00
err_tpg :
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-03-15 05:14:05 +04:00
err_dev :
2012-07-31 00:30:00 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2013-04-25 11:35:20 +04:00
mutex_unlock ( & tcm_vhost_mutex ) ;
2012-07-31 00:30:00 +04:00
return ret ;
2012-07-19 01:31:32 +04:00
}
2013-04-03 10:17:37 +04:00
static int vhost_scsi_set_features ( struct vhost_scsi * vs , u64 features )
{
if ( features & ~ VHOST_SCSI_FEATURES )
return - EOPNOTSUPP ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( ( features & ( 1 < < VHOST_F_LOG_ALL ) ) & &
! vhost_log_access_ok ( & vs - > dev ) ) {
mutex_unlock ( & vs - > dev . mutex ) ;
return - EFAULT ;
}
vs - > dev . acked_features = features ;
smp_wmb ( ) ;
vhost_scsi_flush ( vs ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
return 0 ;
}
2013-09-17 10:30:34 +04:00
static void vhost_scsi_free ( struct vhost_scsi * vs )
{
if ( is_vmalloc_addr ( vs ) )
vfree ( vs ) ;
else
kfree ( vs ) ;
}
2012-07-19 01:31:32 +04:00
static int vhost_scsi_open ( struct inode * inode , struct file * f )
{
2013-05-06 12:38:26 +04:00
struct vhost_scsi * vs ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * * vqs ;
2013-09-17 10:30:34 +04:00
int r = - ENOMEM , i ;
2012-07-19 01:31:32 +04:00
2013-09-17 10:30:34 +04:00
vs = kzalloc ( sizeof ( * vs ) , GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT ) ;
if ( ! vs ) {
vs = vzalloc ( sizeof ( * vs ) ) ;
if ( ! vs )
goto err_vs ;
}
2012-07-19 01:31:32 +04:00
2013-04-27 07:16:48 +04:00
vqs = kmalloc ( VHOST_SCSI_MAX_VQ * sizeof ( * vqs ) , GFP_KERNEL ) ;
2013-09-17 10:30:34 +04:00
if ( ! vqs )
goto err_vqs ;
2013-04-27 07:16:48 +04:00
2013-05-06 12:38:26 +04:00
vhost_work_init ( & vs - > vs_completion_work , vhost_scsi_complete_cmd_work ) ;
vhost_work_init ( & vs - > vs_event_work , tcm_vhost_evt_work ) ;
2013-04-25 11:35:21 +04:00
2013-05-06 12:38:26 +04:00
vs - > vs_events_nr = 0 ;
vs - > vs_events_missed = false ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:26 +04:00
vqs [ VHOST_SCSI_VQ_CTL ] = & vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq ;
vqs [ VHOST_SCSI_VQ_EVT ] = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq . handle_kick = vhost_scsi_ctl_handle_kick ;
vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq . handle_kick = vhost_scsi_evt_handle_kick ;
2013-04-27 07:16:48 +04:00
for ( i = VHOST_SCSI_VQ_IO ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-05-06 12:38:26 +04:00
vqs [ i ] = & vs - > vqs [ i ] . vq ;
vs - > vqs [ i ] . vq . handle_kick = vhost_scsi_handle_kick ;
2013-04-27 07:16:48 +04:00
}
2013-05-06 12:38:26 +04:00
r = vhost_dev_init ( & vs - > dev , vqs , VHOST_SCSI_MAX_VQ ) ;
2013-04-27 07:16:49 +04:00
2013-05-06 12:38:26 +04:00
tcm_vhost_init_inflight ( vs , NULL ) ;
2013-04-27 07:16:49 +04:00
2013-09-17 10:30:34 +04:00
if ( r < 0 )
goto err_init ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:26 +04:00
f - > private_data = vs ;
2012-07-19 01:31:32 +04:00
return 0 ;
2013-09-17 10:30:34 +04:00
err_init :
kfree ( vqs ) ;
err_vqs :
vhost_scsi_free ( vs ) ;
err_vs :
return r ;
2012-07-19 01:31:32 +04:00
}
static int vhost_scsi_release ( struct inode * inode , struct file * f )
{
2013-05-06 12:38:26 +04:00
struct vhost_scsi * vs = f - > private_data ;
2013-02-05 08:31:57 +04:00
struct vhost_scsi_target t ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:26 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
memcpy ( t . vhost_wwpn , vs - > vs_vhost_wwpn , sizeof ( t . vhost_wwpn ) ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
vhost_scsi_clear_endpoint ( vs , & t ) ;
vhost_dev_stop ( & vs - > dev ) ;
vhost_dev_cleanup ( & vs - > dev , false ) ;
2013-04-25 11:35:21 +04:00
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
2013-05-06 12:38:26 +04:00
vhost_scsi_flush ( vs ) ;
kfree ( vs - > dev . vqs ) ;
2013-09-17 10:30:34 +04:00
vhost_scsi_free ( vs ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2013-05-06 12:38:27 +04:00
static long
vhost_scsi_ioctl ( struct file * f ,
unsigned int ioctl ,
unsigned long arg )
2012-07-19 01:31:32 +04:00
{
struct vhost_scsi * vs = f - > private_data ;
struct vhost_scsi_target backend ;
void __user * argp = ( void __user * ) arg ;
u64 __user * featurep = argp ;
2013-04-25 11:35:22 +04:00
u32 __user * eventsp = argp ;
u32 events_missed ;
2012-07-19 01:31:32 +04:00
u64 features ;
2012-07-31 00:30:00 +04:00
int r , abi_version = VHOST_SCSI_ABI_VERSION ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2012-07-19 01:31:32 +04:00
switch ( ioctl ) {
case VHOST_SCSI_SET_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-19 02:44:09 +04:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-19 01:31:32 +04:00
return vhost_scsi_set_endpoint ( vs , & backend ) ;
case VHOST_SCSI_CLEAR_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-19 02:44:09 +04:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-19 01:31:32 +04:00
return vhost_scsi_clear_endpoint ( vs , & backend ) ;
case VHOST_SCSI_GET_ABI_VERSION :
2012-07-31 00:30:00 +04:00
if ( copy_to_user ( argp , & abi_version , sizeof abi_version ) )
2012-07-19 01:31:32 +04:00
return - EFAULT ;
return 0 ;
2013-04-25 11:35:22 +04:00
case VHOST_SCSI_SET_EVENTS_MISSED :
if ( get_user ( events_missed , eventsp ) )
return - EFAULT ;
mutex_lock ( & vq - > mutex ) ;
vs - > vs_events_missed = events_missed ;
mutex_unlock ( & vq - > mutex ) ;
return 0 ;
case VHOST_SCSI_GET_EVENTS_MISSED :
mutex_lock ( & vq - > mutex ) ;
events_missed = vs - > vs_events_missed ;
mutex_unlock ( & vq - > mutex ) ;
if ( put_user ( events_missed , eventsp ) )
return - EFAULT ;
return 0 ;
2012-07-19 01:31:32 +04:00
case VHOST_GET_FEATURES :
2013-03-28 04:23:41 +04:00
features = VHOST_SCSI_FEATURES ;
2012-07-19 01:31:32 +04:00
if ( copy_to_user ( featurep , & features , sizeof features ) )
return - EFAULT ;
return 0 ;
case VHOST_SET_FEATURES :
if ( copy_from_user ( & features , featurep , sizeof features ) )
return - EFAULT ;
return vhost_scsi_set_features ( vs , features ) ;
default :
mutex_lock ( & vs - > dev . mutex ) ;
2012-12-06 16:03:34 +04:00
r = vhost_dev_ioctl ( & vs - > dev , ioctl , argp ) ;
/* TODO: flush backend after dev ioctl. */
if ( r = = - ENOIOCTLCMD )
r = vhost_vring_ioctl ( & vs - > dev , ioctl , argp ) ;
2012-07-19 01:31:32 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
return r ;
}
}
2012-07-31 00:30:00 +04:00
# ifdef CONFIG_COMPAT
static long vhost_scsi_compat_ioctl ( struct file * f , unsigned int ioctl ,
unsigned long arg )
{
return vhost_scsi_ioctl ( f , ioctl , ( unsigned long ) compat_ptr ( arg ) ) ;
}
# endif
2012-07-19 01:31:32 +04:00
static const struct file_operations vhost_scsi_fops = {
. owner = THIS_MODULE ,
. release = vhost_scsi_release ,
. unlocked_ioctl = vhost_scsi_ioctl ,
2012-07-31 00:30:00 +04:00
# ifdef CONFIG_COMPAT
. compat_ioctl = vhost_scsi_compat_ioctl ,
# endif
2012-07-19 01:31:32 +04:00
. open = vhost_scsi_open ,
. llseek = noop_llseek ,
} ;
static struct miscdevice vhost_scsi_misc = {
MISC_DYNAMIC_MINOR ,
" vhost-scsi " ,
& vhost_scsi_fops ,
} ;
static int __init vhost_scsi_register ( void )
{
return misc_register ( & vhost_scsi_misc ) ;
}
static int vhost_scsi_deregister ( void )
{
return misc_deregister ( & vhost_scsi_misc ) ;
}
static char * tcm_vhost_dump_proto_id ( struct tcm_vhost_tport * tport )
{
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return " SAS " ;
case SCSI_PROTOCOL_FCP :
return " FCP " ;
case SCSI_PROTOCOL_ISCSI :
return " iSCSI " ;
default :
break ;
}
return " Unknown " ;
}
2013-05-06 12:38:27 +04:00
static void
tcm_vhost_do_plug ( struct tcm_vhost_tpg * tpg ,
struct se_lun * lun , bool plug )
2013-04-25 11:35:21 +04:00
{
struct vhost_scsi * vs = tpg - > vhost_scsi ;
struct vhost_virtqueue * vq ;
u32 reason ;
if ( ! vs )
return ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( ! vhost_has_feature ( & vs - > dev , VIRTIO_SCSI_F_HOTPLUG ) ) {
mutex_unlock ( & vs - > dev . mutex ) ;
return ;
}
if ( plug )
reason = VIRTIO_SCSI_EVT_RESET_RESCAN ;
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED ;
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
mutex_lock ( & vq - > mutex ) ;
tcm_vhost_send_evt ( vs , tpg , lun ,
VIRTIO_SCSI_T_TRANSPORT_RESET , reason ) ;
mutex_unlock ( & vq - > mutex ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
}
static void tcm_vhost_hotplug ( struct tcm_vhost_tpg * tpg , struct se_lun * lun )
{
tcm_vhost_do_plug ( tpg , lun , true ) ;
}
static void tcm_vhost_hotunplug ( struct tcm_vhost_tpg * tpg , struct se_lun * lun )
{
tcm_vhost_do_plug ( tpg , lun , false ) ;
}
2012-07-31 00:30:00 +04:00
static int tcm_vhost_port_link ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_tpg , se_tpg ) ;
2013-04-25 11:35:21 +04:00
mutex_lock ( & tcm_vhost_mutex ) ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count + + ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
tcm_vhost_hotplug ( tpg , lun ) ;
2013-04-25 11:35:21 +04:00
mutex_unlock ( & tcm_vhost_mutex ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2012-07-31 00:30:00 +04:00
static void tcm_vhost_port_unlink ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_tpg , se_tpg ) ;
2013-04-25 11:35:21 +04:00
mutex_lock ( & tcm_vhost_mutex ) ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count - - ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 11:35:21 +04:00
2013-05-06 12:38:28 +04:00
tcm_vhost_hotunplug ( tpg , lun ) ;
2013-04-25 11:35:21 +04:00
mutex_unlock ( & tcm_vhost_mutex ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static struct se_node_acl *
tcm_vhost_make_nodeacl ( struct se_portal_group * se_tpg ,
struct config_group * group ,
const char * name )
2012-07-19 01:31:32 +04:00
{
struct se_node_acl * se_nacl , * se_nacl_new ;
struct tcm_vhost_nacl * nacl ;
u64 wwpn = 0 ;
u32 nexus_depth ;
/* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR ( - EINVAL ) ; */
se_nacl_new = tcm_vhost_alloc_fabric_acl ( se_tpg ) ;
if ( ! se_nacl_new )
return ERR_PTR ( - ENOMEM ) ;
nexus_depth = 1 ;
/*
* se_nacl_new may be released by core_tpg_add_initiator_node_acl ( )
* when converting a NodeACL from demo mode - > explict
*/
se_nacl = core_tpg_add_initiator_node_acl ( se_tpg , se_nacl_new ,
name , nexus_depth ) ;
if ( IS_ERR ( se_nacl ) ) {
tcm_vhost_release_fabric_acl ( se_tpg , se_nacl_new ) ;
return se_nacl ;
}
/*
* Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
*/
nacl = container_of ( se_nacl , struct tcm_vhost_nacl , se_node_acl ) ;
nacl - > iport_wwpn = wwpn ;
return se_nacl ;
}
static void tcm_vhost_drop_nodeacl ( struct se_node_acl * se_acl )
{
struct tcm_vhost_nacl * nacl = container_of ( se_acl ,
struct tcm_vhost_nacl , se_node_acl ) ;
core_tpg_del_initiator_node_acl ( se_acl - > se_tpg , se_acl , 1 ) ;
kfree ( nacl ) ;
}
2013-06-22 01:32:04 +04:00
static void tcm_vhost_free_cmd_map_res ( struct tcm_vhost_nexus * nexus ,
struct se_session * se_sess )
{
struct tcm_vhost_cmd * tv_cmd ;
unsigned int i ;
if ( ! se_sess - > sess_cmd_map )
return ;
for ( i = 0 ; i < TCM_VHOST_DEFAULT_TAGS ; i + + ) {
tv_cmd = & ( ( struct tcm_vhost_cmd * ) se_sess - > sess_cmd_map ) [ i ] ;
kfree ( tv_cmd - > tvc_sgl ) ;
kfree ( tv_cmd - > tvc_upages ) ;
}
}
2013-05-06 12:38:28 +04:00
static int tcm_vhost_make_nexus ( struct tcm_vhost_tpg * tpg ,
2013-05-06 12:38:27 +04:00
const char * name )
2012-07-19 01:31:32 +04:00
{
struct se_portal_group * se_tpg ;
2013-06-22 01:32:04 +04:00
struct se_session * se_sess ;
2012-07-19 01:31:32 +04:00
struct tcm_vhost_nexus * tv_nexus ;
2013-06-22 01:32:04 +04:00
struct tcm_vhost_cmd * tv_cmd ;
unsigned int i ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_debug ( " tpg->tpg_nexus already exists \n " ) ;
2012-07-19 01:31:32 +04:00
return - EEXIST ;
}
2013-05-06 12:38:28 +04:00
se_tpg = & tpg - > se_tpg ;
2012-07-19 01:31:32 +04:00
tv_nexus = kzalloc ( sizeof ( struct tcm_vhost_nexus ) , GFP_KERNEL ) ;
if ( ! tv_nexus ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
pr_err ( " Unable to allocate struct tcm_vhost_nexus \n " ) ;
return - ENOMEM ;
}
/*
2013-06-08 04:47:46 +04:00
* Initialize the struct se_session pointer and setup tagpool
* for struct tcm_vhost_cmd descriptors
2012-07-19 01:31:32 +04:00
*/
2013-06-08 04:47:46 +04:00
tv_nexus - > tvn_se_sess = transport_init_session_tags (
TCM_VHOST_DEFAULT_TAGS ,
sizeof ( struct tcm_vhost_cmd ) ) ;
2012-07-19 01:31:32 +04:00
if ( IS_ERR ( tv_nexus - > tvn_se_sess ) ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
kfree ( tv_nexus ) ;
return - ENOMEM ;
}
2013-06-22 01:32:04 +04:00
se_sess = tv_nexus - > tvn_se_sess ;
for ( i = 0 ; i < TCM_VHOST_DEFAULT_TAGS ; i + + ) {
tv_cmd = & ( ( struct tcm_vhost_cmd * ) se_sess - > sess_cmd_map ) [ i ] ;
tv_cmd - > tvc_sgl = kzalloc ( sizeof ( struct scatterlist ) *
TCM_VHOST_PREALLOC_SGLS , GFP_KERNEL ) ;
if ( ! tv_cmd - > tvc_sgl ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate tv_cmd->tvc_sgl \n " ) ;
goto out ;
}
tv_cmd - > tvc_upages = kzalloc ( sizeof ( struct page * ) *
TCM_VHOST_PREALLOC_PAGES , GFP_KERNEL ) ;
if ( ! tv_cmd - > tvc_upages ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate tv_cmd->tvc_upages \n " ) ;
goto out ;
}
}
2012-07-19 01:31:32 +04:00
/*
* Since we are running in ' demo mode ' this call with generate a
* struct se_node_acl for the tcm_vhost struct se_portal_group with
* the SCSI Initiator port name of the passed configfs group ' name ' .
*/
tv_nexus - > tvn_se_sess - > se_node_acl = core_tpg_check_initiator_node_acl (
se_tpg , ( unsigned char * ) name ) ;
if ( ! tv_nexus - > tvn_se_sess - > se_node_acl ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
pr_debug ( " core_tpg_check_initiator_node_acl() failed "
" for %s \n " , name ) ;
2013-06-22 01:32:04 +04:00
goto out ;
2012-07-19 01:31:32 +04:00
}
/*
2012-07-31 00:30:00 +04:00
* Now register the TCM vhost virtual I_T Nexus as active with the
2012-07-19 01:31:32 +04:00
* call to __transport_register_session ( )
*/
__transport_register_session ( se_tpg , tv_nexus - > tvn_se_sess - > se_node_acl ,
tv_nexus - > tvn_se_sess , tv_nexus ) ;
2013-05-06 12:38:28 +04:00
tpg - > tpg_nexus = tv_nexus ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
2013-06-22 01:32:04 +04:00
out :
tcm_vhost_free_cmd_map_res ( tv_nexus , se_sess ) ;
transport_free_session ( se_sess ) ;
kfree ( tv_nexus ) ;
return - ENOMEM ;
2012-07-19 01:31:32 +04:00
}
2012-07-31 00:30:00 +04:00
static int tcm_vhost_drop_nexus ( struct tcm_vhost_tpg * tpg )
2012-07-19 01:31:32 +04:00
{
struct se_session * se_sess ;
struct tcm_vhost_nexus * tv_nexus ;
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
if ( ! tv_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
se_sess = tv_nexus - > tvn_se_sess ;
if ( ! se_sess ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
2012-07-31 00:30:00 +04:00
if ( tpg - > tv_tpg_port_count ! = 0 ) {
2012-07-19 01:31:32 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-31 00:30:00 +04:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-19 01:31:32 +04:00
" active TPG port count: %d \n " ,
2012-07-31 00:30:00 +04:00
tpg - > tv_tpg_port_count ) ;
return - EBUSY ;
2012-07-19 01:31:32 +04:00
}
2012-07-31 00:30:00 +04:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
2012-07-19 01:31:32 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-31 00:30:00 +04:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-19 01:31:32 +04:00
" active TPG vhost count: %d \n " ,
2012-07-31 00:30:00 +04:00
tpg - > tv_tpg_vhost_count ) ;
return - EBUSY ;
2012-07-19 01:31:32 +04:00
}
2012-07-31 00:30:00 +04:00
pr_debug ( " TCM_vhost_ConfigFS: Removing I_T Nexus to emulated "
2012-07-19 01:31:32 +04:00
" %s Initiator Port: %s \n " , tcm_vhost_dump_proto_id ( tpg - > tport ) ,
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-06-22 01:32:04 +04:00
tcm_vhost_free_cmd_map_res ( tv_nexus , se_sess ) ;
2012-07-19 01:31:32 +04:00
/*
2012-07-31 00:30:00 +04:00
* Release the SCSI I_T Nexus to the emulated vhost Target Port
2012-07-19 01:31:32 +04:00
*/
transport_deregister_session ( tv_nexus - > tvn_se_sess ) ;
tpg - > tpg_nexus = NULL ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
kfree ( tv_nexus ) ;
return 0 ;
}
2012-07-31 00:30:00 +04:00
static ssize_t tcm_vhost_tpg_show_nexus ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
char * page )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_tpg , se_tpg ) ;
struct tcm_vhost_nexus * tv_nexus ;
ssize_t ret ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
2012-07-19 01:31:32 +04:00
if ( ! tv_nexus ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return - ENODEV ;
}
ret = snprintf ( page , PAGE_SIZE , " %s \n " ,
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return ret ;
}
2012-07-31 00:30:00 +04:00
static ssize_t tcm_vhost_tpg_store_nexus ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
const char * page ,
size_t count )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
2012-07-19 01:31:32 +04:00
struct tcm_vhost_tpg , se_tpg ) ;
2013-05-06 12:38:28 +04:00
struct tcm_vhost_tport * tport_wwn = tpg - > tport ;
2012-07-19 01:31:32 +04:00
unsigned char i_port [ TCM_VHOST_NAMELEN ] , * ptr , * port_ptr ;
int ret ;
/*
* Shutdown the active I_T nexus if ' NULL ' is passed . .
*/
if ( ! strncmp ( page , " NULL " , 4 ) ) {
2013-05-06 12:38:28 +04:00
ret = tcm_vhost_drop_nexus ( tpg ) ;
2012-07-19 01:31:32 +04:00
return ( ! ret ) ? count : ret ;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
* the fabric protocol_id set in tcm_vhost_make_tport ( ) , and call
* tcm_vhost_make_nexus ( ) .
*/
if ( strlen ( page ) > = TCM_VHOST_NAMELEN ) {
pr_err ( " Emulated NAA Sas Address: %s, exceeds "
" max: %d \n " , page , TCM_VHOST_NAMELEN ) ;
return - EINVAL ;
}
snprintf ( & i_port [ 0 ] , TCM_VHOST_NAMELEN , " %s " , page ) ;
ptr = strstr ( i_port , " naa. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_SAS ) {
pr_err ( " Passed SAS Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
tcm_vhost_dump_proto_id ( tport_wwn ) ) ;
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
ptr = strstr ( i_port , " fc. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_FCP ) {
pr_err ( " Passed FCP Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
tcm_vhost_dump_proto_id ( tport_wwn ) ) ;
return - EINVAL ;
}
port_ptr = & i_port [ 3 ] ; /* Skip over "fc." */
goto check_newline ;
}
ptr = strstr ( i_port , " iqn. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_ISCSI ) {
pr_err ( " Passed iSCSI Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
tcm_vhost_dump_proto_id ( tport_wwn ) ) ;
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
pr_err ( " Unable to locate prefix for emulated Initiator Port: "
" %s \n " , i_port ) ;
return - EINVAL ;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline :
if ( i_port [ strlen ( i_port ) - 1 ] = = ' \n ' )
i_port [ strlen ( i_port ) - 1 ] = ' \0 ' ;
2013-05-06 12:38:28 +04:00
ret = tcm_vhost_make_nexus ( tpg , port_ptr ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 )
return ret ;
return count ;
}
TF_TPG_BASE_ATTR ( tcm_vhost , nexus , S_IRUGO | S_IWUSR ) ;
static struct configfs_attribute * tcm_vhost_tpg_attrs [ ] = {
& tcm_vhost_tpg_nexus . attr ,
NULL ,
} ;
2013-05-06 12:38:27 +04:00
static struct se_portal_group *
tcm_vhost_make_tpg ( struct se_wwn * wwn ,
struct config_group * group ,
const char * name )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tport * tport = container_of ( wwn ,
struct tcm_vhost_tport , tport_wwn ) ;
struct tcm_vhost_tpg * tpg ;
unsigned long tpgt ;
int ret ;
if ( strstr ( name , " tpgt_ " ) ! = name )
return ERR_PTR ( - EINVAL ) ;
if ( kstrtoul ( name + 5 , 10 , & tpgt ) | | tpgt > UINT_MAX )
return ERR_PTR ( - EINVAL ) ;
tpg = kzalloc ( sizeof ( struct tcm_vhost_tpg ) , GFP_KERNEL ) ;
if ( ! tpg ) {
pr_err ( " Unable to allocate struct tcm_vhost_tpg " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
mutex_init ( & tpg - > tv_tpg_mutex ) ;
INIT_LIST_HEAD ( & tpg - > tv_tpg_list ) ;
tpg - > tport = tport ;
tpg - > tport_tpgt = tpgt ;
ret = core_tpg_register ( & tcm_vhost_fabric_configfs - > tf_ops , wwn ,
& tpg - > se_tpg , tpg , TRANSPORT_TPG_TYPE_NORMAL ) ;
if ( ret < 0 ) {
kfree ( tpg ) ;
return NULL ;
}
mutex_lock ( & tcm_vhost_mutex ) ;
list_add_tail ( & tpg - > tv_tpg_list , & tcm_vhost_list ) ;
mutex_unlock ( & tcm_vhost_mutex ) ;
return & tpg - > se_tpg ;
}
static void tcm_vhost_drop_tpg ( struct se_portal_group * se_tpg )
{
struct tcm_vhost_tpg * tpg = container_of ( se_tpg ,
struct tcm_vhost_tpg , se_tpg ) ;
mutex_lock ( & tcm_vhost_mutex ) ;
list_del ( & tpg - > tv_tpg_list ) ;
mutex_unlock ( & tcm_vhost_mutex ) ;
/*
2012-07-31 00:30:00 +04:00
* Release the virtual I_T Nexus for this vhost TPG
2012-07-19 01:31:32 +04:00
*/
tcm_vhost_drop_nexus ( tpg ) ;
/*
* Deregister the se_tpg from TCM . .
*/
core_tpg_deregister ( se_tpg ) ;
kfree ( tpg ) ;
}
2013-05-06 12:38:27 +04:00
static struct se_wwn *
tcm_vhost_make_tport ( struct target_fabric_configfs * tf ,
struct config_group * group ,
const char * name )
2012-07-19 01:31:32 +04:00
{
struct tcm_vhost_tport * tport ;
char * ptr ;
u64 wwpn = 0 ;
int off = 0 ;
/* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
return ERR_PTR ( - EINVAL ) ; */
tport = kzalloc ( sizeof ( struct tcm_vhost_tport ) , GFP_KERNEL ) ;
if ( ! tport ) {
pr_err ( " Unable to allocate struct tcm_vhost_tport " ) ;
return ERR_PTR ( - ENOMEM ) ;
}
tport - > tport_wwpn = wwpn ;
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name .
*/
ptr = strstr ( name , " naa. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_SAS ;
goto check_len ;
}
ptr = strstr ( name , " fc. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_FCP ;
off = 3 ; /* Skip over "fc." */
goto check_len ;
}
ptr = strstr ( name , " iqn. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_ISCSI ;
goto check_len ;
}
pr_err ( " Unable to locate prefix for emulated Target Port: "
" %s \n " , name ) ;
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
check_len :
if ( strlen ( name ) > = TCM_VHOST_NAMELEN ) {
pr_err ( " Emulated %s Address: %s, exceeds "
" max: %d \n " , name , tcm_vhost_dump_proto_id ( tport ) ,
TCM_VHOST_NAMELEN ) ;
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
}
snprintf ( & tport - > tport_name [ 0 ] , TCM_VHOST_NAMELEN , " %s " , & name [ off ] ) ;
pr_debug ( " TCM_VHost_ConfigFS: Allocated emulated Target "
" %s Address: %s \n " , tcm_vhost_dump_proto_id ( tport ) , name ) ;
return & tport - > tport_wwn ;
}
static void tcm_vhost_drop_tport ( struct se_wwn * wwn )
{
struct tcm_vhost_tport * tport = container_of ( wwn ,
struct tcm_vhost_tport , tport_wwn ) ;
pr_debug ( " TCM_VHost_ConfigFS: Deallocating emulated Target "
" %s Address: %s \n " , tcm_vhost_dump_proto_id ( tport ) ,
tport - > tport_name ) ;
kfree ( tport ) ;
}
2013-05-06 12:38:27 +04:00
static ssize_t
tcm_vhost_wwn_show_attr_version ( struct target_fabric_configfs * tf ,
char * page )
2012-07-19 01:31:32 +04:00
{
return sprintf ( page , " TCM_VHOST fabric module %s on %s/%s "
" on " UTS_RELEASE " \n " , TCM_VHOST_VERSION , utsname ( ) - > sysname ,
utsname ( ) - > machine ) ;
}
TF_WWN_ATTR_RO ( tcm_vhost , version ) ;
static struct configfs_attribute * tcm_vhost_wwn_attrs [ ] = {
& tcm_vhost_wwn_version . attr ,
NULL ,
} ;
static struct target_core_fabric_ops tcm_vhost_ops = {
. get_fabric_name = tcm_vhost_get_fabric_name ,
. get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident ,
. tpg_get_wwn = tcm_vhost_get_fabric_wwn ,
. tpg_get_tag = tcm_vhost_get_tag ,
. tpg_get_default_depth = tcm_vhost_get_default_depth ,
. tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id ,
. tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len ,
. tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id ,
. tpg_check_demo_mode = tcm_vhost_check_true ,
. tpg_check_demo_mode_cache = tcm_vhost_check_true ,
. tpg_check_demo_mode_write_protect = tcm_vhost_check_false ,
. tpg_check_prod_mode_write_protect = tcm_vhost_check_false ,
. tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl ,
. tpg_release_fabric_acl = tcm_vhost_release_fabric_acl ,
. tpg_get_inst_index = tcm_vhost_tpg_get_inst_index ,
. release_cmd = tcm_vhost_release_cmd ,
2013-06-06 13:20:41 +04:00
. check_stop_free = vhost_scsi_check_stop_free ,
2012-07-19 01:31:32 +04:00
. shutdown_session = tcm_vhost_shutdown_session ,
. close_session = tcm_vhost_close_session ,
. sess_get_index = tcm_vhost_sess_get_index ,
. sess_get_initiator_sid = NULL ,
. write_pending = tcm_vhost_write_pending ,
. write_pending_status = tcm_vhost_write_pending_status ,
. set_default_node_attributes = tcm_vhost_set_default_node_attrs ,
. get_task_tag = tcm_vhost_get_task_tag ,
. get_cmd_state = tcm_vhost_get_cmd_state ,
. queue_data_in = tcm_vhost_queue_data_in ,
. queue_status = tcm_vhost_queue_status ,
. queue_tm_rsp = tcm_vhost_queue_tm_rsp ,
/*
* Setup callers for generic logic in target_core_fabric_configfs . c
*/
. fabric_make_wwn = tcm_vhost_make_tport ,
. fabric_drop_wwn = tcm_vhost_drop_tport ,
. fabric_make_tpg = tcm_vhost_make_tpg ,
. fabric_drop_tpg = tcm_vhost_drop_tpg ,
. fabric_post_link = tcm_vhost_port_link ,
. fabric_pre_unlink = tcm_vhost_port_unlink ,
. fabric_make_np = NULL ,
. fabric_drop_np = NULL ,
. fabric_make_nodeacl = tcm_vhost_make_nodeacl ,
. fabric_drop_nodeacl = tcm_vhost_drop_nodeacl ,
} ;
static int tcm_vhost_register_configfs ( void )
{
struct target_fabric_configfs * fabric ;
int ret ;
pr_debug ( " TCM_VHOST fabric module %s on %s/%s "
" on " UTS_RELEASE " \n " , TCM_VHOST_VERSION , utsname ( ) - > sysname ,
utsname ( ) - > machine ) ;
/*
* Register the top level struct config_item_type with TCM core
*/
fabric = target_fabric_configfs_init ( THIS_MODULE , " vhost " ) ;
if ( IS_ERR ( fabric ) ) {
pr_err ( " target_fabric_configfs_init() failed \n " ) ;
return PTR_ERR ( fabric ) ;
}
/*
* Setup fabric - > tf_ops from our local tcm_vhost_ops
*/
fabric - > tf_ops = tcm_vhost_ops ;
/*
* Setup default attribute lists for various fabric - > tf_cit_tmpl
*/
TF_CIT_TMPL ( fabric ) - > tfc_wwn_cit . ct_attrs = tcm_vhost_wwn_attrs ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_base_cit . ct_attrs = tcm_vhost_tpg_attrs ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_attrib_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_param_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_np_base_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_nacl_base_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_nacl_attrib_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_nacl_auth_cit . ct_attrs = NULL ;
TF_CIT_TMPL ( fabric ) - > tfc_tpg_nacl_param_cit . ct_attrs = NULL ;
/*
* Register the fabric for use within TCM
*/
ret = target_fabric_configfs_register ( fabric ) ;
if ( ret < 0 ) {
pr_err ( " target_fabric_configfs_register() failed "
" for TCM_VHOST \n " ) ;
return ret ;
}
/*
* Setup our local pointer to * fabric
*/
tcm_vhost_fabric_configfs = fabric ;
pr_debug ( " TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs \n " ) ;
return 0 ;
} ;
static void tcm_vhost_deregister_configfs ( void )
{
if ( ! tcm_vhost_fabric_configfs )
return ;
target_fabric_configfs_deregister ( tcm_vhost_fabric_configfs ) ;
tcm_vhost_fabric_configfs = NULL ;
pr_debug ( " TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs \n " ) ;
} ;
static int __init tcm_vhost_init ( void )
{
int ret = - ENOMEM ;
2012-07-31 00:30:00 +04:00
/*
* Use our own dedicated workqueue for submitting I / O into
* target core to avoid contention within system_wq .
*/
2012-07-19 01:31:32 +04:00
tcm_vhost_workqueue = alloc_workqueue ( " tcm_vhost " , 0 , 0 ) ;
if ( ! tcm_vhost_workqueue )
goto out ;
ret = vhost_scsi_register ( ) ;
if ( ret < 0 )
goto out_destroy_workqueue ;
ret = tcm_vhost_register_configfs ( ) ;
if ( ret < 0 )
goto out_vhost_scsi_deregister ;
return 0 ;
out_vhost_scsi_deregister :
vhost_scsi_deregister ( ) ;
out_destroy_workqueue :
destroy_workqueue ( tcm_vhost_workqueue ) ;
out :
return ret ;
} ;
static void tcm_vhost_exit ( void )
{
tcm_vhost_deregister_configfs ( ) ;
vhost_scsi_deregister ( ) ;
destroy_workqueue ( tcm_vhost_workqueue ) ;
} ;
2013-05-02 04:52:59 +04:00
MODULE_DESCRIPTION ( " VHOST_SCSI series fabric driver " ) ;
MODULE_ALIAS ( " tcm_vhost " ) ;
2012-07-19 01:31:32 +04:00
MODULE_LICENSE ( " GPL " ) ;
module_init ( tcm_vhost_init ) ;
module_exit ( tcm_vhost_exit ) ;