2012-07-18 14:31:32 -07:00
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
2013-09-05 15:29:12 -07:00
* ( C ) Copyright 2010 - 2013 Datera , Inc .
2012-07-18 14:31:32 -07:00
* ( C ) Copyright 2010 - 2012 IBM Corp .
*
* Licensed to the Linux Foundation under the General Public License ( GPL ) version 2.
*
2013-09-05 15:29:12 -07:00
* Authors : Nicholas A . Bellinger < nab @ daterainc . com >
2012-07-18 14:31:32 -07:00
* Stefan Hajnoczi < stefanha @ linux . vnet . ibm . com >
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <generated/utsrelease.h>
# include <linux/utsname.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/kthread.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/configfs.h>
# include <linux/ctype.h>
# include <linux/compat.h>
# include <linux/eventfd.h>
# include <linux/fs.h>
# include <linux/miscdevice.h>
# include <asm/unaligned.h>
# include <scsi/scsi.h>
# include <target/target_core_base.h>
# include <target/target_core_fabric.h>
# include <target/target_core_fabric_configfs.h>
# include <target/target_core_configfs.h>
# include <target/configfs_macros.h>
# include <linux/vhost.h>
# include <linux/virtio_scsi.h>
2013-01-06 14:36:13 +08:00
# include <linux/llist.h>
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
# include <linux/bitmap.h>
2013-06-07 17:47:46 -07:00
# include <linux/percpu_ida.h>
2012-07-18 14:31:32 -07:00
# include "vhost.h"
2013-05-02 03:50:34 +03:00
2015-01-31 23:56:53 -08:00
# define VHOST_SCSI_VERSION "v0.1"
# define VHOST_SCSI_NAMELEN 256
# define VHOST_SCSI_MAX_CDB_SIZE 32
# define VHOST_SCSI_DEFAULT_TAGS 256
# define VHOST_SCSI_PREALLOC_SGLS 2048
# define VHOST_SCSI_PREALLOC_UPAGES 2048
# define VHOST_SCSI_PREALLOC_PROT_SGLS 512
2013-05-02 03:50:34 +03:00
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp ;
/* Refcount for the inflight reqs */
struct kref kref ;
} ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd {
2013-05-02 03:50:34 +03:00
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc ;
/* virtio-scsi initiator task attribute */
int tvc_task_attr ;
2015-01-27 13:13:12 -08:00
/* virtio-scsi response incoming iovecs */
int tvc_in_iovs ;
2013-05-02 03:50:34 +03:00
/* virtio-scsi initiator data direction */
enum dma_data_direction tvc_data_direction ;
/* Expected data transfer length from virtio-scsi header */
u32 tvc_exp_data_len ;
/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
u64 tvc_tag ;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count ;
2014-02-22 18:34:43 -08:00
u32 tvc_prot_sgl_count ;
2015-01-31 23:56:53 -08:00
/* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
2013-05-02 03:50:34 +03:00
u32 tvc_lun ;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist * tvc_sgl ;
2014-02-22 18:08:24 -08:00
struct scatterlist * tvc_prot_sgl ;
2013-06-21 14:32:04 -07:00
struct page * * tvc_upages ;
2015-01-27 13:13:12 -08:00
/* Pointer to response header iovec */
struct iovec * tvc_resp_iov ;
2013-05-02 03:50:34 +03:00
/* Pointer to vhost_scsi for our device */
struct vhost_scsi * tvc_vhost ;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue * tvc_vq ;
/* Pointer to vhost nexus memory */
2015-01-31 23:56:53 -08:00
struct vhost_scsi_nexus * tvc_nexus ;
2013-05-02 03:50:34 +03:00
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd ;
2015-01-31 23:56:53 -08:00
/* work item used for cmwq dispatch to vhost_scsi_submission_work() */
2013-05-02 03:50:34 +03:00
struct work_struct work ;
/* Copy of the incoming SCSI command descriptor block (CDB) */
2015-01-31 23:56:53 -08:00
unsigned char tvc_cdb [ VHOST_SCSI_MAX_CDB_SIZE ] ;
2013-05-02 03:50:34 +03:00
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf [ TRANSPORT_SENSE_BUFFER ] ;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list ;
/* Used to track inflight cmd */
struct vhost_scsi_inflight * inflight ;
} ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_nexus {
2013-05-02 03:50:34 +03:00
/* Pointer to TCM session for I_T Nexus */
struct se_session * tvn_se_sess ;
} ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg {
2013-05-02 03:50:34 +03:00
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt ;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
int tv_tpg_port_count ;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count ;
2015-03-28 00:03:51 -07:00
/* Used for enabling T10-PI with legacy devices */
int tv_fabric_prot_type ;
2015-01-31 23:56:53 -08:00
/* list for vhost_scsi_list */
2013-05-02 03:50:34 +03:00
struct list_head tv_tpg_list ;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex ;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
2015-01-31 23:56:53 -08:00
struct vhost_scsi_nexus * tpg_nexus ;
/* Pointer back to vhost_scsi_tport */
struct vhost_scsi_tport * tport ;
/* Returned by vhost_scsi_make_tpg() */
2013-05-02 03:50:34 +03:00
struct se_portal_group se_tpg ;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi * vhost_scsi ;
} ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport {
2013-05-02 03:50:34 +03:00
/* SCSI protocol the tport is providing */
u8 tport_proto_id ;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn ;
/* ASCII formatted WWPN for Vhost Target port */
2015-01-31 23:56:53 -08:00
char tport_name [ VHOST_SCSI_NAMELEN ] ;
/* Returned by vhost_scsi_make_tport() */
2013-05-02 03:50:34 +03:00
struct se_wwn tport_wwn ;
} ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_evt {
2013-05-02 03:50:34 +03:00
/* event to be sent to guest */
struct virtio_scsi_event event ;
/* event list, serviced from vhost worker thread */
struct llist_node list ;
} ;
2012-07-18 14:31:32 -07:00
2012-07-30 13:30:00 -07:00
enum {
VHOST_SCSI_VQ_CTL = 0 ,
VHOST_SCSI_VQ_EVT = 1 ,
VHOST_SCSI_VQ_IO = 2 ,
} ;
2014-11-23 18:01:34 +02:00
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
2013-03-27 17:23:41 -07:00
enum {
2014-02-22 18:22:31 -08:00
VHOST_SCSI_FEATURES = VHOST_FEATURES | ( 1ULL < < VIRTIO_SCSI_F_HOTPLUG ) |
2015-01-28 01:31:52 -08:00
( 1ULL < < VIRTIO_SCSI_F_T10_PI ) |
( 1ULL < < VIRTIO_F_ANY_LAYOUT ) |
( 1ULL < < VIRTIO_F_VERSION_1 )
2013-03-27 17:23:41 -07:00
} ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
# define VHOST_SCSI_MAX_TARGET 256
# define VHOST_SCSI_MAX_VQ 128
2013-04-25 15:35:21 +08:00
# define VHOST_SCSI_MAX_EVENT 128
2013-02-05 12:31:57 +08:00
2013-04-27 11:16:48 +08:00
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq ;
2013-04-28 15:38:52 +03:00
/*
* Reference counting for inflight reqs , used for flush operation . At
* each time , one reference tracks new commands submitted , while we
* wait for another one to reach 0.
*/
2013-04-27 11:16:49 +08:00
struct vhost_scsi_inflight inflights [ 2 ] ;
2013-04-28 15:38:52 +03:00
/*
* Indicate current inflight in use , protected by vq - > mutex .
* Writers must also take dev mutex and flush under it .
*/
2013-04-27 11:16:49 +08:00
int inflight_idx ;
2013-04-27 11:16:48 +08:00
} ;
2012-07-18 14:31:32 -07:00
struct vhost_scsi {
2013-02-05 12:31:57 +08:00
/* Protected by vhost_scsi->dev.mutex */
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * * vs_tpg ;
2013-02-05 12:31:57 +08:00
char vs_vhost_wwpn [ TRANSPORT_IQN_LEN ] ;
2012-07-18 14:31:32 -07:00
struct vhost_dev dev ;
2013-04-27 11:16:48 +08:00
struct vhost_scsi_virtqueue vqs [ VHOST_SCSI_MAX_VQ ] ;
2012-07-18 14:31:32 -07:00
struct vhost_work vs_completion_work ; /* cmd completion work item */
2013-01-06 14:36:13 +08:00
struct llist_head vs_completion_list ; /* cmd completion queue */
2013-04-25 15:35:21 +08:00
struct vhost_work vs_event_work ; /* evt injection work item */
struct llist_head vs_event_list ; /* evt injection queue */
bool vs_events_missed ; /* any missed events, protected by vq->mutex */
int vs_events_nr ; /* num of pending events, protected by vq->mutex */
2012-07-18 14:31:32 -07:00
} ;
2015-04-08 20:01:35 +02:00
static struct target_core_fabric_ops vhost_scsi_ops ;
2015-01-31 23:56:53 -08:00
static struct workqueue_struct * vhost_scsi_workqueue ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
static DEFINE_MUTEX ( vhost_scsi_mutex ) ;
static LIST_HEAD ( vhost_scsi_list ) ;
2012-07-18 14:31:32 -07:00
2015-01-28 13:10:51 -08:00
static int iov_num_pages ( void __user * iov_base , size_t iov_len )
2013-01-22 11:20:25 +08:00
{
2015-01-28 13:10:51 -08:00
return ( PAGE_ALIGN ( ( unsigned long ) iov_base + iov_len ) -
( ( unsigned long ) iov_base & PAGE_MASK ) ) > > PAGE_SHIFT ;
2013-01-22 11:20:25 +08:00
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_done_inflight ( struct kref * kref )
2013-04-27 11:16:49 +08:00
{
struct vhost_scsi_inflight * inflight ;
inflight = container_of ( kref , struct vhost_scsi_inflight , kref ) ;
complete ( & inflight - > comp ) ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_init_inflight ( struct vhost_scsi * vs ,
2013-04-27 11:16:49 +08:00
struct vhost_scsi_inflight * old_inflight [ ] )
{
struct vhost_scsi_inflight * new_inflight ;
struct vhost_virtqueue * vq ;
int idx , i ;
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
mutex_lock ( & vq - > mutex ) ;
/* store old infight */
idx = vs - > vqs [ i ] . inflight_idx ;
if ( old_inflight )
old_inflight [ i ] = & vs - > vqs [ i ] . inflights [ idx ] ;
/* setup new infight */
vs - > vqs [ i ] . inflight_idx = idx ^ 1 ;
new_inflight = & vs - > vqs [ i ] . inflights [ idx ^ 1 ] ;
kref_init ( & new_inflight - > kref ) ;
init_completion ( & new_inflight - > comp ) ;
mutex_unlock ( & vq - > mutex ) ;
}
}
static struct vhost_scsi_inflight *
2015-01-31 23:56:53 -08:00
vhost_scsi_get_inflight ( struct vhost_virtqueue * vq )
2013-04-27 11:16:49 +08:00
{
struct vhost_scsi_inflight * inflight ;
struct vhost_scsi_virtqueue * svq ;
svq = container_of ( vq , struct vhost_scsi_virtqueue , vq ) ;
inflight = & svq - > inflights [ svq - > inflight_idx ] ;
kref_get ( & inflight - > kref ) ;
return inflight ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_put_inflight ( struct vhost_scsi_inflight * inflight )
2013-04-27 11:16:49 +08:00
{
2015-01-31 23:56:53 -08:00
kref_put ( & inflight - > kref , vhost_scsi_done_inflight ) ;
2013-04-27 11:16:49 +08:00
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_check_true ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
return 1 ;
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_check_false ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static char * vhost_scsi_get_fabric_name ( void )
2012-07-18 14:31:32 -07:00
{
return " vhost " ;
}
2015-01-31 23:56:53 -08:00
static u8 vhost_scsi_get_fabric_proto_ident ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_fabric_proto_ident ( se_tpg ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_fabric_proto_ident ( se_tpg ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_fabric_proto_ident ( se_tpg ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_fabric_proto_ident ( se_tpg ) ;
}
2015-01-31 23:56:53 -08:00
static char * vhost_scsi_get_fabric_wwn ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
return & tport - > tport_name [ 0 ] ;
}
2015-01-31 23:56:53 -08:00
static u16 vhost_scsi_get_tpgt ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-18 14:31:32 -07:00
return tpg - > tport_tpgt ;
}
2013-05-06 16:38:27 +08:00
static u32
2015-01-31 23:56:53 -08:00
vhost_scsi_get_pr_transport_id ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
struct se_node_acl * se_nacl ,
struct t10_pr_registration * pr_reg ,
int * format_code ,
unsigned char * buf )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_pr_transport_id ( se_tpg , se_nacl , pr_reg ,
format_code , buf ) ;
}
2013-05-06 16:38:27 +08:00
static u32
2015-01-31 23:56:53 -08:00
vhost_scsi_get_pr_transport_id_len ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
struct se_node_acl * se_nacl ,
struct t10_pr_registration * pr_reg ,
int * format_code )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
case SCSI_PROTOCOL_FCP :
return fc_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_get_pr_transport_id_len ( se_tpg , se_nacl , pr_reg ,
format_code ) ;
}
2013-05-06 16:38:27 +08:00
static char *
2015-01-31 23:56:53 -08:00
vhost_scsi_parse_pr_out_transport_id ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
const char * buf ,
u32 * out_tid_len ,
char * * port_nexus_ptr )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return sas_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
case SCSI_PROTOCOL_FCP :
return fc_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
case SCSI_PROTOCOL_ISCSI :
return iscsi_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
default :
pr_err ( " Unknown tport_proto_id: 0x%02x, using "
" SAS emulation \n " , tport - > tport_proto_id ) ;
break ;
}
return sas_parse_pr_out_transport_id ( se_tpg , buf , out_tid_len ,
port_nexus_ptr ) ;
}
2015-03-28 00:03:51 -07:00
static int vhost_scsi_check_prot_fabric_only ( struct se_portal_group * se_tpg )
{
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
return tpg - > tv_fabric_prot_type ;
}
2015-01-31 23:56:53 -08:00
static u32 vhost_scsi_tpg_get_inst_index ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
return 1 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_release_cmd ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * tv_cmd = container_of ( se_cmd ,
struct vhost_scsi_cmd , tvc_se_cmd ) ;
2015-01-29 17:21:13 -08:00
struct se_session * se_sess = tv_cmd - > tvc_nexus - > tvn_se_sess ;
2014-02-22 18:34:43 -08:00
int i ;
2013-06-06 02:20:41 -07:00
if ( tv_cmd - > tvc_sgl_count ) {
for ( i = 0 ; i < tv_cmd - > tvc_sgl_count ; i + + )
put_page ( sg_page ( & tv_cmd - > tvc_sgl [ i ] ) ) ;
2013-09-17 22:54:31 +03:00
}
2014-02-22 18:34:43 -08:00
if ( tv_cmd - > tvc_prot_sgl_count ) {
for ( i = 0 ; i < tv_cmd - > tvc_prot_sgl_count ; i + + )
put_page ( sg_page ( & tv_cmd - > tvc_prot_sgl [ i ] ) ) ;
}
2013-06-06 02:20:41 -07:00
2015-01-31 23:56:53 -08:00
vhost_scsi_put_inflight ( tv_cmd - > inflight ) ;
2013-06-07 17:47:46 -07:00
percpu_ida_free ( & se_sess - > sess_tag_pool , se_cmd - > map_tag ) ;
2012-07-18 14:31:32 -07:00
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_shutdown_session ( struct se_session * se_sess )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_close_session ( struct se_session * se_sess )
2012-07-18 14:31:32 -07:00
{
return ;
}
2015-01-31 23:56:53 -08:00
static u32 vhost_scsi_sess_get_index ( struct se_session * se_sess )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_write_pending ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
/* Go ahead and process the write immediately */
target_execute_cmd ( se_cmd ) ;
return 0 ;
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_write_pending_status ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_set_default_node_attrs ( struct se_node_acl * nacl )
2012-07-18 14:31:32 -07:00
{
return ;
}
2015-01-31 23:56:53 -08:00
static u32 vhost_scsi_get_task_tag ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_get_cmd_state ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
return 0 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_complete_cmd ( struct vhost_scsi_cmd * cmd )
2012-07-30 13:30:00 -07:00
{
2013-05-06 16:38:29 +08:00
struct vhost_scsi * vs = cmd - > tvc_vhost ;
2012-07-30 13:30:00 -07:00
2013-05-06 16:38:29 +08:00
llist_add ( & cmd - > tvc_completion_list , & vs - > vs_completion_list ) ;
2012-07-30 13:30:00 -07:00
vhost_work_queue ( & vs - > dev , & vs - > vs_completion_work ) ;
}
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
static int vhost_scsi_queue_data_in ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd = container_of ( se_cmd ,
struct vhost_scsi_cmd , tvc_se_cmd ) ;
2013-05-06 16:38:29 +08:00
vhost_scsi_complete_cmd ( cmd ) ;
2012-07-18 14:31:32 -07:00
return 0 ;
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_queue_status ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd = container_of ( se_cmd ,
struct vhost_scsi_cmd , tvc_se_cmd ) ;
2013-05-06 16:38:29 +08:00
vhost_scsi_complete_cmd ( cmd ) ;
2012-07-18 14:31:32 -07:00
return 0 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_queue_tm_rsp ( struct se_cmd * se_cmd )
2012-07-18 14:31:32 -07:00
{
2013-07-03 11:22:17 -04:00
return ;
2012-07-18 14:31:32 -07:00
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_aborted_task ( struct se_cmd * se_cmd )
2014-03-22 14:55:56 -07:00
{
return ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_free_evt ( struct vhost_scsi * vs , struct vhost_scsi_evt * evt )
2013-04-25 15:35:21 +08:00
{
vs - > vs_events_nr - - ;
kfree ( evt ) ;
}
2015-01-31 23:56:53 -08:00
static struct vhost_scsi_evt *
vhost_scsi_allocate_evt ( struct vhost_scsi * vs ,
2013-05-06 16:38:27 +08:00
u32 event , u32 reason )
2013-04-25 15:35:21 +08:00
{
2013-04-27 11:16:48 +08:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_evt * evt ;
2013-04-25 15:35:21 +08:00
if ( vs - > vs_events_nr > VHOST_SCSI_MAX_EVENT ) {
vs - > vs_events_missed = true ;
return NULL ;
}
evt = kzalloc ( sizeof ( * evt ) , GFP_KERNEL ) ;
if ( ! evt ) {
2015-01-31 23:56:53 -08:00
vq_err ( vq , " Failed to allocate vhost_scsi_evt \n " ) ;
2013-04-25 15:35:21 +08:00
vs - > vs_events_missed = true ;
return NULL ;
}
2014-11-23 18:01:34 +02:00
evt - > event . event = cpu_to_vhost32 ( vq , event ) ;
evt - > event . reason = cpu_to_vhost32 ( vq , reason ) ;
2013-04-25 15:35:21 +08:00
vs - > vs_events_nr + + ;
return evt ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_free_cmd ( struct vhost_scsi_cmd * cmd )
2012-07-18 14:31:32 -07:00
{
2013-05-06 16:38:29 +08:00
struct se_cmd * se_cmd = & cmd - > tvc_se_cmd ;
2012-07-18 14:31:32 -07:00
/* TODO locking against target/backend threads? */
2013-06-06 01:44:48 -07:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
2012-07-18 14:31:32 -07:00
2013-06-06 02:20:41 -07:00
}
2013-04-27 11:16:49 +08:00
2013-06-06 02:20:41 -07:00
static int vhost_scsi_check_stop_free ( struct se_cmd * se_cmd )
{
2015-04-27 13:52:36 +02:00
return target_put_sess_cmd ( se_cmd ) ;
2012-07-18 14:31:32 -07:00
}
2013-05-06 16:38:27 +08:00
static void
2015-01-31 23:56:53 -08:00
vhost_scsi_do_evt_work ( struct vhost_scsi * vs , struct vhost_scsi_evt * evt )
2013-04-25 15:35:21 +08:00
{
2013-04-27 11:16:48 +08:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 15:35:21 +08:00
struct virtio_scsi_event * event = & evt - > event ;
struct virtio_scsi_event __user * eventp ;
unsigned out , in ;
int head , ret ;
if ( ! vq - > private_data ) {
vs - > vs_events_missed = true ;
return ;
}
again :
vhost_disable_notify ( & vs - > dev , vq ) ;
2014-06-05 15:20:27 +03:00
head = vhost_get_vq_desc ( vq , vq - > iov ,
2013-04-25 15:35:21 +08:00
ARRAY_SIZE ( vq - > iov ) , & out , & in ,
NULL , NULL ) ;
if ( head < 0 ) {
vs - > vs_events_missed = true ;
return ;
}
if ( head = = vq - > num ) {
if ( vhost_enable_notify ( & vs - > dev , vq ) )
goto again ;
vs - > vs_events_missed = true ;
return ;
}
if ( ( vq - > iov [ out ] . iov_len ! = sizeof ( struct virtio_scsi_event ) ) ) {
vq_err ( vq , " Expecting virtio_scsi_event, got %zu bytes \n " ,
vq - > iov [ out ] . iov_len ) ;
vs - > vs_events_missed = true ;
return ;
}
if ( vs - > vs_events_missed ) {
2014-11-23 18:01:34 +02:00
event - > event | = cpu_to_vhost32 ( vq , VIRTIO_SCSI_T_EVENTS_MISSED ) ;
2013-04-25 15:35:21 +08:00
vs - > vs_events_missed = false ;
}
eventp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( eventp , event , sizeof ( * event ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
2015-01-31 23:56:53 -08:00
vq_err ( vq , " Faulted on vhost_scsi_send_event \n " ) ;
2013-04-25 15:35:21 +08:00
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_evt_work ( struct vhost_work * work )
2013-04-25 15:35:21 +08:00
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_event_work ) ;
2013-04-27 11:16:48 +08:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_evt * evt ;
2013-04-25 15:35:21 +08:00
struct llist_node * llnode ;
mutex_lock ( & vq - > mutex ) ;
llnode = llist_del_all ( & vs - > vs_event_list ) ;
while ( llnode ) {
2015-01-31 23:56:53 -08:00
evt = llist_entry ( llnode , struct vhost_scsi_evt , list ) ;
2013-04-25 15:35:21 +08:00
llnode = llist_next ( llnode ) ;
2015-01-31 23:56:53 -08:00
vhost_scsi_do_evt_work ( vs , evt ) ;
vhost_scsi_free_evt ( vs , evt ) ;
2013-04-25 15:35:21 +08:00
}
mutex_unlock ( & vq - > mutex ) ;
}
2012-07-18 14:31:32 -07:00
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
* process mm and can access the vring .
*/
static void vhost_scsi_complete_cmd_work ( struct vhost_work * work )
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_completion_work ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
DECLARE_BITMAP ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 14:36:13 +08:00
struct virtio_scsi_cmd_resp v_rsp ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd ;
2013-01-06 14:36:13 +08:00
struct llist_node * llnode ;
struct se_cmd * se_cmd ;
2015-01-27 13:13:12 -08:00
struct iov_iter iov_iter ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
int ret , vq ;
2012-07-18 14:31:32 -07:00
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
bitmap_zero ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 14:36:13 +08:00
llnode = llist_del_all ( & vs - > vs_completion_list ) ;
while ( llnode ) {
2015-01-31 23:56:53 -08:00
cmd = llist_entry ( llnode , struct vhost_scsi_cmd ,
2013-01-06 14:36:13 +08:00
tvc_completion_list ) ;
llnode = llist_next ( llnode ) ;
2013-05-06 16:38:29 +08:00
se_cmd = & cmd - > tvc_se_cmd ;
2012-07-18 14:31:32 -07:00
pr_debug ( " %s tv_cmd %p resid %u status %#02x \n " , __func__ ,
2013-05-06 16:38:29 +08:00
cmd , se_cmd - > residual_count , se_cmd - > scsi_status ) ;
2012-07-18 14:31:32 -07:00
memset ( & v_rsp , 0 , sizeof ( v_rsp ) ) ;
2014-11-23 18:01:34 +02:00
v_rsp . resid = cpu_to_vhost32 ( cmd - > tvc_vq , se_cmd - > residual_count ) ;
2012-07-18 14:31:32 -07:00
/* TODO is status_qualifier field needed? */
v_rsp . status = se_cmd - > scsi_status ;
2014-11-23 18:01:34 +02:00
v_rsp . sense_len = cpu_to_vhost32 ( cmd - > tvc_vq ,
se_cmd - > scsi_sense_length ) ;
2013-05-06 16:38:29 +08:00
memcpy ( v_rsp . sense , cmd - > tvc_sense_buf ,
2014-11-23 18:01:34 +02:00
se_cmd - > scsi_sense_length ) ;
2015-01-27 13:13:12 -08:00
iov_iter_init ( & iov_iter , READ , cmd - > tvc_resp_iov ,
cmd - > tvc_in_iovs , sizeof ( v_rsp ) ) ;
ret = copy_to_iter ( & v_rsp , sizeof ( v_rsp ) , & iov_iter ) ;
if ( likely ( ret = = sizeof ( v_rsp ) ) ) {
2013-04-27 11:16:48 +08:00
struct vhost_scsi_virtqueue * q ;
2013-05-06 16:38:29 +08:00
vhost_add_used ( cmd - > tvc_vq , cmd - > tvc_vq_desc , 0 ) ;
q = container_of ( cmd - > tvc_vq , struct vhost_scsi_virtqueue , vq ) ;
2013-04-27 11:16:48 +08:00
vq = q - vs - > vqs ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
__set_bit ( vq , signal ) ;
} else
2012-07-18 14:31:32 -07:00
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
2013-05-06 16:38:29 +08:00
vhost_scsi_free_cmd ( cmd ) ;
2012-07-18 14:31:32 -07:00
}
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
vq = - 1 ;
while ( ( vq = find_next_bit ( signal , VHOST_SCSI_MAX_VQ , vq + 1 ) )
< VHOST_SCSI_MAX_VQ )
2013-04-27 11:16:48 +08:00
vhost_signal ( & vs - > dev , & vs - > vqs [ vq ] . vq ) ;
2012-07-18 14:31:32 -07:00
}
2015-01-31 23:56:53 -08:00
static struct vhost_scsi_cmd *
vhost_scsi_get_tag ( struct vhost_virtqueue * vq , struct vhost_scsi_tpg * tpg ,
2014-02-22 18:22:31 -08:00
unsigned char * cdb , u64 scsi_tag , u16 lun , u8 task_attr ,
u32 exp_data_len , int data_direction )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd ;
struct vhost_scsi_nexus * tv_nexus ;
2013-06-07 17:47:46 -07:00
struct se_session * se_sess ;
2014-02-22 18:08:24 -08:00
struct scatterlist * sg , * prot_sg ;
2013-06-21 14:32:04 -07:00
struct page * * pages ;
2013-06-07 17:47:46 -07:00
int tag ;
2012-07-18 14:31:32 -07:00
2013-05-06 16:38:28 +08:00
tv_nexus = tpg - > tpg_nexus ;
2012-07-18 14:31:32 -07:00
if ( ! tv_nexus ) {
2015-01-31 23:56:53 -08:00
pr_err ( " Unable to locate active struct vhost_scsi_nexus \n " ) ;
2012-07-18 14:31:32 -07:00
return ERR_PTR ( - EIO ) ;
}
2013-06-07 17:47:46 -07:00
se_sess = tv_nexus - > tvn_se_sess ;
2012-07-18 14:31:32 -07:00
2014-01-19 08:26:37 +00:00
tag = percpu_ida_alloc ( & se_sess - > sess_tag_pool , TASK_RUNNING ) ;
2013-09-23 11:42:28 -07:00
if ( tag < 0 ) {
2015-01-31 23:56:53 -08:00
pr_err ( " Unable to obtain tag for vhost_scsi_cmd \n " ) ;
2013-09-23 11:42:28 -07:00
return ERR_PTR ( - ENOMEM ) ;
}
2015-01-31 23:56:53 -08:00
cmd = & ( ( struct vhost_scsi_cmd * ) se_sess - > sess_cmd_map ) [ tag ] ;
2013-06-21 14:32:04 -07:00
sg = cmd - > tvc_sgl ;
2014-02-22 18:08:24 -08:00
prot_sg = cmd - > tvc_prot_sgl ;
2013-06-21 14:32:04 -07:00
pages = cmd - > tvc_upages ;
2015-01-31 23:56:53 -08:00
memset ( cmd , 0 , sizeof ( struct vhost_scsi_cmd ) ) ;
2013-06-07 17:47:46 -07:00
2013-06-21 14:32:04 -07:00
cmd - > tvc_sgl = sg ;
2014-02-22 18:08:24 -08:00
cmd - > tvc_prot_sgl = prot_sg ;
2013-06-21 14:32:04 -07:00
cmd - > tvc_upages = pages ;
2013-06-07 17:47:46 -07:00
cmd - > tvc_se_cmd . map_tag = tag ;
2014-02-22 18:22:31 -08:00
cmd - > tvc_tag = scsi_tag ;
cmd - > tvc_lun = lun ;
cmd - > tvc_task_attr = task_attr ;
2013-05-06 16:38:29 +08:00
cmd - > tvc_exp_data_len = exp_data_len ;
cmd - > tvc_data_direction = data_direction ;
cmd - > tvc_nexus = tv_nexus ;
2015-01-31 23:56:53 -08:00
cmd - > inflight = vhost_scsi_get_inflight ( vq ) ;
2013-05-06 16:38:29 +08:00
2015-01-31 23:56:53 -08:00
memcpy ( cmd - > tvc_cdb , cdb , VHOST_SCSI_MAX_CDB_SIZE ) ;
2014-02-22 18:22:31 -08:00
2013-05-06 16:38:29 +08:00
return cmd ;
2012-07-18 14:31:32 -07:00
}
/*
* Map a user memory range into a scatterlist
*
* Returns the number of scatterlist entries used or - errno on error .
*/
2013-05-06 16:38:27 +08:00
static int
2015-01-31 23:56:53 -08:00
vhost_scsi_map_to_sgl ( struct vhost_scsi_cmd * cmd ,
2015-01-28 13:10:51 -08:00
void __user * ptr ,
size_t len ,
2013-06-21 14:32:04 -07:00
struct scatterlist * sgl ,
2014-02-22 18:34:08 -08:00
bool write )
2012-07-18 14:31:32 -07:00
{
2015-01-28 13:10:51 -08:00
unsigned int npages = 0 , offset , nbytes ;
unsigned int pages_nr = iov_num_pages ( ptr , len ) ;
2012-07-18 14:31:32 -07:00
struct scatterlist * sg = sgl ;
2015-01-28 13:10:51 -08:00
struct page * * pages = cmd - > tvc_upages ;
2013-01-22 11:20:27 +08:00
int ret , i ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
if ( pages_nr > VHOST_SCSI_PREALLOC_UPAGES ) {
2013-06-21 14:32:04 -07:00
pr_err ( " vhost_scsi_map_to_sgl() pages_nr: %u greater than "
2015-01-31 23:56:53 -08:00
" preallocated VHOST_SCSI_PREALLOC_UPAGES: %u \n " ,
pages_nr , VHOST_SCSI_PREALLOC_UPAGES ) ;
2013-06-21 14:32:04 -07:00
return - ENOBUFS ;
}
2013-01-22 11:20:27 +08:00
ret = get_user_pages_fast ( ( unsigned long ) ptr , pages_nr , write , pages ) ;
/* No pages were pinned */
if ( ret < 0 )
goto out ;
/* Less pages pinned than wanted */
if ( ret ! = pages_nr ) {
for ( i = 0 ; i < ret ; i + + )
put_page ( pages [ i ] ) ;
ret = - EFAULT ;
goto out ;
}
while ( len > 0 ) {
offset = ( uintptr_t ) ptr & ~ PAGE_MASK ;
nbytes = min_t ( unsigned int , PAGE_SIZE - offset , len ) ;
sg_set_page ( sg , pages [ npages ] , nbytes , offset ) ;
2012-07-18 14:31:32 -07:00
ptr + = nbytes ;
len - = nbytes ;
sg + + ;
npages + + ;
}
2013-01-22 11:20:27 +08:00
out :
2012-07-18 14:31:32 -07:00
return ret ;
}
2013-05-06 16:38:27 +08:00
static int
2015-01-28 00:15:00 -08:00
vhost_scsi_calc_sgls ( struct iov_iter * iter , size_t bytes , int max_sgls )
2012-07-18 14:31:32 -07:00
{
2015-01-28 00:15:00 -08:00
int sgl_count = 0 ;
2012-07-18 14:31:32 -07:00
2015-01-28 00:15:00 -08:00
if ( ! iter | | ! iter - > iov ) {
pr_err ( " %s: iter->iov is NULL, but expected bytes: %zu "
" present \n " , __func__ , bytes ) ;
return - EINVAL ;
}
2013-01-22 11:20:26 +08:00
2015-01-28 00:15:00 -08:00
sgl_count = iov_iter_npages ( iter , 0xffff ) ;
if ( sgl_count > max_sgls ) {
pr_err ( " %s: requested sgl_count: %d exceeds pre-allocated "
" max_sgls: %d \n " , __func__ , sgl_count , max_sgls ) ;
return - EINVAL ;
2014-02-22 18:34:08 -08:00
}
2015-01-28 00:15:00 -08:00
return sgl_count ;
}
2012-07-18 14:31:32 -07:00
2015-01-28 00:15:00 -08:00
static int
2015-01-31 23:56:53 -08:00
vhost_scsi_iov_to_sgl ( struct vhost_scsi_cmd * cmd , bool write ,
struct iov_iter * iter ,
struct scatterlist * sg , int sg_count )
2015-01-28 00:15:00 -08:00
{
size_t off = iter - > iov_offset ;
int i , ret ;
2012-07-18 14:31:32 -07:00
2015-01-28 00:15:00 -08:00
for ( i = 0 ; i < iter - > nr_segs ; i + + ) {
void __user * base = iter - > iov [ i ] . iov_base + off ;
size_t len = iter - > iov [ i ] . iov_len - off ;
2014-02-22 18:34:08 -08:00
2015-01-28 00:15:00 -08:00
ret = vhost_scsi_map_to_sgl ( cmd , base , len , sg , write ) ;
2012-07-18 14:31:32 -07:00
if ( ret < 0 ) {
2015-01-28 00:15:00 -08:00
for ( i = 0 ; i < sg_count ; i + + ) {
struct page * page = sg_page ( & sg [ i ] ) ;
if ( page )
put_page ( page ) ;
}
2012-07-18 14:31:32 -07:00
return ret ;
}
sg + = ret ;
2015-01-28 00:15:00 -08:00
off = 0 ;
2012-07-18 14:31:32 -07:00
}
return 0 ;
}
2014-02-22 18:34:43 -08:00
static int
2015-01-31 23:56:53 -08:00
vhost_scsi_mapal ( struct vhost_scsi_cmd * cmd ,
2015-01-28 00:15:00 -08:00
size_t prot_bytes , struct iov_iter * prot_iter ,
size_t data_bytes , struct iov_iter * data_iter )
{
int sgl_count , ret ;
bool write = ( cmd - > tvc_data_direction = = DMA_FROM_DEVICE ) ;
if ( prot_bytes ) {
sgl_count = vhost_scsi_calc_sgls ( prot_iter , prot_bytes ,
2015-01-31 23:56:53 -08:00
VHOST_SCSI_PREALLOC_PROT_SGLS ) ;
2015-01-28 00:15:00 -08:00
if ( sgl_count < 0 )
return sgl_count ;
sg_init_table ( cmd - > tvc_prot_sgl , sgl_count ) ;
cmd - > tvc_prot_sgl_count = sgl_count ;
pr_debug ( " %s prot_sg %p prot_sgl_count %u \n " , __func__ ,
cmd - > tvc_prot_sgl , cmd - > tvc_prot_sgl_count ) ;
ret = vhost_scsi_iov_to_sgl ( cmd , write , prot_iter ,
cmd - > tvc_prot_sgl ,
cmd - > tvc_prot_sgl_count ) ;
2014-02-22 18:34:43 -08:00
if ( ret < 0 ) {
cmd - > tvc_prot_sgl_count = 0 ;
return ret ;
}
2015-01-28 00:15:00 -08:00
}
sgl_count = vhost_scsi_calc_sgls ( data_iter , data_bytes ,
2015-01-31 23:56:53 -08:00
VHOST_SCSI_PREALLOC_SGLS ) ;
2015-01-28 00:15:00 -08:00
if ( sgl_count < 0 )
return sgl_count ;
sg_init_table ( cmd - > tvc_sgl , sgl_count ) ;
cmd - > tvc_sgl_count = sgl_count ;
pr_debug ( " %s data_sg %p data_sgl_count %u \n " , __func__ ,
cmd - > tvc_sgl , cmd - > tvc_sgl_count ) ;
ret = vhost_scsi_iov_to_sgl ( cmd , write , data_iter ,
cmd - > tvc_sgl , cmd - > tvc_sgl_count ) ;
if ( ret < 0 ) {
cmd - > tvc_sgl_count = 0 ;
return ret ;
2014-02-22 18:34:43 -08:00
}
return 0 ;
}
2014-12-21 10:42:08 -08:00
static int vhost_scsi_to_tcm_attr ( int attr )
{
switch ( attr ) {
case VIRTIO_SCSI_S_SIMPLE :
return TCM_SIMPLE_TAG ;
case VIRTIO_SCSI_S_ORDERED :
return TCM_ORDERED_TAG ;
case VIRTIO_SCSI_S_HEAD :
return TCM_HEAD_TAG ;
case VIRTIO_SCSI_S_ACA :
return TCM_ACA_TAG ;
default :
break ;
}
return TCM_SIMPLE_TAG ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_submission_work ( struct work_struct * work )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd =
container_of ( work , struct vhost_scsi_cmd , work ) ;
struct vhost_scsi_nexus * tv_nexus ;
2013-05-06 16:38:29 +08:00
struct se_cmd * se_cmd = & cmd - > tvc_se_cmd ;
2014-02-22 18:22:31 -08:00
struct scatterlist * sg_ptr , * sg_prot_ptr = NULL ;
int rc ;
2012-07-18 14:31:32 -07:00
2014-02-22 18:22:31 -08:00
/* FIXME: BIDI operation */
2013-05-06 16:38:29 +08:00
if ( cmd - > tvc_sgl_count ) {
sg_ptr = cmd - > tvc_sgl ;
2014-02-22 18:22:31 -08:00
if ( cmd - > tvc_prot_sgl_count )
sg_prot_ptr = cmd - > tvc_prot_sgl ;
else
se_cmd - > prot_pto = true ;
2012-07-18 14:31:32 -07:00
} else {
sg_ptr = NULL ;
}
2013-05-06 16:38:29 +08:00
tv_nexus = cmd - > tvc_nexus ;
2012-10-01 18:40:55 -07:00
rc = target_submit_cmd_map_sgls ( se_cmd , tv_nexus - > tvn_se_sess ,
2013-05-06 16:38:29 +08:00
cmd - > tvc_cdb , & cmd - > tvc_sense_buf [ 0 ] ,
cmd - > tvc_lun , cmd - > tvc_exp_data_len ,
2014-12-21 10:42:08 -08:00
vhost_scsi_to_tcm_attr ( cmd - > tvc_task_attr ) ,
cmd - > tvc_data_direction , TARGET_SCF_ACK_KREF ,
sg_ptr , cmd - > tvc_sgl_count , NULL , 0 , sg_prot_ptr ,
cmd - > tvc_prot_sgl_count ) ;
2012-07-18 14:31:32 -07:00
if ( rc < 0 ) {
transport_send_check_condition_and_sense ( se_cmd ,
2012-10-01 18:40:55 -07:00
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE , 0 ) ;
2012-07-18 14:31:32 -07:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
}
}
2013-05-06 16:38:27 +08:00
static void
vhost_scsi_send_bad_target ( struct vhost_scsi * vs ,
struct vhost_virtqueue * vq ,
int head , unsigned out )
2013-04-10 15:06:15 +08:00
{
struct virtio_scsi_cmd_resp __user * resp ;
struct virtio_scsi_cmd_resp rsp ;
int ret ;
memset ( & rsp , 0 , sizeof ( rsp ) ) ;
rsp . response = VIRTIO_SCSI_S_BAD_TARGET ;
resp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( resp , & rsp , sizeof ( rsp ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
}
2013-05-06 16:38:27 +08:00
static void
vhost_scsi_handle_vq ( struct vhost_scsi * vs , struct vhost_virtqueue * vq )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * * vs_tpg , * tpg ;
2012-07-18 14:31:32 -07:00
struct virtio_scsi_cmd_req v_req ;
2014-02-22 18:22:31 -08:00
struct virtio_scsi_cmd_req_pi v_req_pi ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * cmd ;
2015-01-25 21:14:58 -08:00
struct iov_iter out_iter , in_iter , prot_iter , data_iter ;
2014-02-22 18:22:31 -08:00
u64 tag ;
2015-01-25 21:14:58 -08:00
u32 exp_data_len , data_direction ;
unsigned out , in ;
int head , ret , prot_bytes ;
size_t req_size , rsp_size = sizeof ( struct virtio_scsi_cmd_resp ) ;
size_t out_size , in_size ;
2014-02-22 18:22:31 -08:00
u16 lun ;
u8 * target , * lunp , task_attr ;
2015-01-25 21:14:58 -08:00
bool t10_pi = vhost_has_feature ( vq , VIRTIO_SCSI_F_T10_PI ) ;
2014-02-22 18:22:31 -08:00
void * req , * cdb ;
2012-07-18 14:31:32 -07:00
2013-05-07 14:54:35 +08:00
mutex_lock ( & vq - > mutex ) ;
2013-04-03 14:17:37 +08:00
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl .
*/
2013-05-07 14:54:35 +08:00
vs_tpg = vq - > private_data ;
2013-04-03 14:17:37 +08:00
if ( ! vs_tpg )
2013-05-07 14:54:35 +08:00
goto out ;
2012-07-18 14:31:32 -07:00
vhost_disable_notify ( & vs - > dev , vq ) ;
for ( ; ; ) {
2014-06-05 15:20:27 +03:00
head = vhost_get_vq_desc ( vq , vq - > iov ,
2015-01-25 21:14:58 -08:00
ARRAY_SIZE ( vq - > iov ) , & out , & in ,
NULL , NULL ) ;
2012-07-18 14:31:32 -07:00
pr_debug ( " vhost_get_vq_desc: head: %d, out: %u in: %u \n " ,
2015-01-25 21:14:58 -08:00
head , out , in ) ;
2012-07-18 14:31:32 -07:00
/* On error, stop handling until the next kick. */
if ( unlikely ( head < 0 ) )
break ;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if ( head = = vq - > num ) {
if ( unlikely ( vhost_enable_notify ( & vs - > dev , vq ) ) ) {
vhost_disable_notify ( & vs - > dev , vq ) ;
continue ;
}
break ;
}
/*
2015-01-25 21:14:58 -08:00
* Check for a sane response buffer so we can report early
* errors back to the guest .
2012-07-18 14:31:32 -07:00
*/
2015-01-25 21:14:58 -08:00
if ( unlikely ( vq - > iov [ out ] . iov_len < rsp_size ) ) {
vq_err ( vq , " Expecting at least virtio_scsi_cmd_resp "
" size, got %zu bytes \n " , vq - > iov [ out ] . iov_len ) ;
2012-07-18 14:31:32 -07:00
break ;
}
2015-01-25 21:14:58 -08:00
/*
* Setup pointers and values based upon different virtio - scsi
* request header if T10_PI is enabled in KVM guest .
*/
if ( t10_pi ) {
2014-02-22 18:22:31 -08:00
req = & v_req_pi ;
2015-01-25 21:14:58 -08:00
req_size = sizeof ( v_req_pi ) ;
2014-02-22 18:22:31 -08:00
lunp = & v_req_pi . lun [ 0 ] ;
target = & v_req_pi . lun [ 1 ] ;
} else {
req = & v_req ;
2015-01-25 21:14:58 -08:00
req_size = sizeof ( v_req ) ;
2014-02-22 18:22:31 -08:00
lunp = & v_req . lun [ 0 ] ;
target = & v_req . lun [ 1 ] ;
}
2015-01-25 21:14:58 -08:00
/*
* FIXME : Not correct for BIDI operation
*/
out_size = iov_length ( vq - > iov , out ) ;
in_size = iov_length ( & vq - > iov [ out ] , in ) ;
2014-02-22 18:22:31 -08:00
2015-01-25 21:14:58 -08:00
/*
* Copy over the virtio - scsi request header , which for a
* ANY_LAYOUT enabled guest may span multiple iovecs , or a
* single iovec may contain both the header + outgoing
* WRITE payloads .
*
* copy_from_iter ( ) will advance out_iter , so that it will
* point at the start of the outgoing WRITE payload , if
* DMA_TO_DEVICE is set .
*/
iov_iter_init ( & out_iter , WRITE , vq - > iov , out , out_size ) ;
2012-07-18 14:31:32 -07:00
2015-01-25 21:14:58 -08:00
ret = copy_from_iter ( req , req_size , & out_iter ) ;
if ( unlikely ( ret ! = req_size ) ) {
vq_err ( vq , " Faulted on copy_from_iter \n " ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2012-07-18 14:31:32 -07:00
}
2014-02-24 14:13:32 -08:00
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
2014-02-22 18:22:31 -08:00
if ( unlikely ( * lunp ! = 1 ) ) {
2015-01-25 21:14:58 -08:00
vq_err ( vq , " Illegal virtio-scsi lun: %u \n " , * lunp ) ;
2014-02-24 14:13:32 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
}
2014-02-22 18:22:31 -08:00
tpg = ACCESS_ONCE ( vs_tpg [ * target ] ) ;
2013-05-06 16:38:28 +08:00
if ( unlikely ( ! tpg ) ) {
2015-01-25 21:14:58 -08:00
/* Target does not exist, fail the request */
2013-04-10 15:06:15 +08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
2013-02-05 12:31:57 +08:00
continue ;
}
2014-02-22 18:22:31 -08:00
/*
2015-01-25 21:14:58 -08:00
* Determine data_direction by calculating the total outgoing
* iovec sizes + incoming iovec sizes vs . virtio - scsi request +
* response headers respectively .
2014-02-22 18:22:31 -08:00
*
2015-01-25 21:14:58 -08:00
* For DMA_TO_DEVICE this is out_iter , which is already pointing
* to the right place .
*
* For DMA_FROM_DEVICE , the iovec will be just past the end
* of the virtio - scsi response header in either the same
* or immediately following iovec .
2014-02-22 18:22:31 -08:00
*
2015-01-25 21:14:58 -08:00
* Any associated T10_PI bytes for the outgoing / incoming
* payloads are included in calculation of exp_data_len here .
2014-02-22 18:22:31 -08:00
*/
2015-01-25 21:14:58 -08:00
prot_bytes = 0 ;
if ( out_size > req_size ) {
data_direction = DMA_TO_DEVICE ;
exp_data_len = out_size - req_size ;
data_iter = out_iter ;
} else if ( in_size > rsp_size ) {
data_direction = DMA_FROM_DEVICE ;
exp_data_len = in_size - rsp_size ;
iov_iter_init ( & in_iter , READ , & vq - > iov [ out ] , in ,
rsp_size + exp_data_len ) ;
iov_iter_advance ( & in_iter , rsp_size ) ;
data_iter = in_iter ;
} else {
data_direction = DMA_NONE ;
exp_data_len = 0 ;
}
/*
* If T10_PI header + payload is present , setup prot_iter values
* and recalculate data_iter for vhost_scsi_mapal ( ) mapping to
* host scatterlists via get_user_pages_fast ( ) .
2014-02-22 18:22:31 -08:00
*/
2015-01-25 21:14:58 -08:00
if ( t10_pi ) {
2014-02-22 18:22:31 -08:00
if ( v_req_pi . pi_bytesout ) {
if ( data_direction ! = DMA_TO_DEVICE ) {
2015-01-25 21:14:58 -08:00
vq_err ( vq , " Received non zero pi_bytesout, "
" but wrong data_direction \n " ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2014-02-22 18:22:31 -08:00
}
2014-11-23 18:01:34 +02:00
prot_bytes = vhost32_to_cpu ( vq , v_req_pi . pi_bytesout ) ;
2014-02-22 18:22:31 -08:00
} else if ( v_req_pi . pi_bytesin ) {
if ( data_direction ! = DMA_FROM_DEVICE ) {
2015-01-25 21:14:58 -08:00
vq_err ( vq , " Received non zero pi_bytesin, "
" but wrong data_direction \n " ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2014-02-22 18:22:31 -08:00
}
2014-11-23 18:01:34 +02:00
prot_bytes = vhost32_to_cpu ( vq , v_req_pi . pi_bytesin ) ;
2014-02-22 18:22:31 -08:00
}
2015-01-25 21:14:58 -08:00
/*
* Set prot_iter to data_iter , and advance past any
* preceeding prot_bytes that may be present .
*
* Also fix up the exp_data_len to reflect only the
* actual data payload length .
*/
2014-02-22 18:22:31 -08:00
if ( prot_bytes ) {
2015-01-25 21:14:58 -08:00
exp_data_len - = prot_bytes ;
prot_iter = data_iter ;
iov_iter_advance ( & data_iter , prot_bytes ) ;
2014-02-22 18:22:31 -08:00
}
2014-11-23 18:01:34 +02:00
tag = vhost64_to_cpu ( vq , v_req_pi . tag ) ;
2014-02-22 18:22:31 -08:00
task_attr = v_req_pi . task_attr ;
cdb = & v_req_pi . cdb [ 0 ] ;
lun = ( ( v_req_pi . lun [ 2 ] < < 8 ) | v_req_pi . lun [ 3 ] ) & 0x3FFF ;
} else {
2014-11-23 18:01:34 +02:00
tag = vhost64_to_cpu ( vq , v_req . tag ) ;
2014-02-22 18:22:31 -08:00
task_attr = v_req . task_attr ;
cdb = & v_req . cdb [ 0 ] ;
lun = ( ( v_req . lun [ 2 ] < < 8 ) | v_req . lun [ 3 ] ) & 0x3FFF ;
}
/*
2015-01-25 21:14:58 -08:00
* Check that the received CDB size does not exceeded our
* hardcoded max for vhost - scsi , then get a pre - allocated
* cmd descriptor for the new virtio - scsi tag .
2014-02-22 18:22:31 -08:00
*
* TODO what if cdb was too small for varlen cdb header ?
*/
2015-01-31 23:56:53 -08:00
if ( unlikely ( scsi_command_size ( cdb ) > VHOST_SCSI_MAX_CDB_SIZE ) ) {
2014-02-22 18:22:31 -08:00
vq_err ( vq , " Received SCSI CDB with command_size: %d that "
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d \n " ,
2015-01-31 23:56:53 -08:00
scsi_command_size ( cdb ) , VHOST_SCSI_MAX_CDB_SIZE ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2014-02-22 18:22:31 -08:00
}
cmd = vhost_scsi_get_tag ( vq , tpg , cdb , tag , lun , task_attr ,
2014-06-10 01:19:38 -07:00
exp_data_len + prot_bytes ,
data_direction ) ;
2013-05-06 16:38:29 +08:00
if ( IS_ERR ( cmd ) ) {
2013-06-07 17:47:46 -07:00
vq_err ( vq , " vhost_scsi_get_tag failed %ld \n " ,
2015-01-25 21:14:58 -08:00
PTR_ERR ( cmd ) ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2012-07-18 14:31:32 -07:00
}
2013-05-06 16:38:29 +08:00
cmd - > tvc_vhost = vs ;
cmd - > tvc_vq = vq ;
2015-01-27 13:13:12 -08:00
cmd - > tvc_resp_iov = & vq - > iov [ out ] ;
cmd - > tvc_in_iovs = in ;
2012-07-18 14:31:32 -07:00
pr_debug ( " vhost_scsi got command opcode: %#02x, lun: %d \n " ,
2015-01-25 21:14:58 -08:00
cmd - > tvc_cdb [ 0 ] , cmd - > tvc_lun ) ;
pr_debug ( " cmd: %p exp_data_len: %d, prot_bytes: %d data_direction: "
" %d \n " , cmd , exp_data_len , prot_bytes , data_direction ) ;
2012-07-18 14:31:32 -07:00
if ( data_direction ! = DMA_NONE ) {
2015-01-25 21:14:58 -08:00
ret = vhost_scsi_mapal ( cmd ,
prot_bytes , & prot_iter ,
exp_data_len , & data_iter ) ;
2012-07-18 14:31:32 -07:00
if ( unlikely ( ret ) ) {
vq_err ( vq , " Failed to map iov to sgl \n " ) ;
2015-01-31 23:56:53 -08:00
vhost_scsi_release_cmd ( & cmd - > tvc_se_cmd ) ;
2015-01-29 17:21:13 -08:00
vhost_scsi_send_bad_target ( vs , vq , head , out ) ;
continue ;
2012-07-18 14:31:32 -07:00
}
}
/*
* Save the descriptor from vhost_get_vq_desc ( ) to be used to
* complete the virtio - scsi request in TCM callback context via
2015-01-25 21:14:58 -08:00
* vhost_scsi_queue_data_in ( ) and vhost_scsi_queue_status ( )
2012-07-18 14:31:32 -07:00
*/
2013-05-06 16:38:29 +08:00
cmd - > tvc_vq_desc = head ;
2012-07-18 14:31:32 -07:00
/*
2015-01-25 21:14:58 -08:00
* Dispatch cmd descriptor for cmwq execution in process
* context provided by vhost_scsi_workqueue . This also ensures
* cmd is executed on the same kworker CPU as this vhost
* thread to gain positive L2 cache locality effects .
2012-07-18 14:31:32 -07:00
*/
2015-01-31 23:56:53 -08:00
INIT_WORK ( & cmd - > work , vhost_scsi_submission_work ) ;
queue_work ( vhost_scsi_workqueue , & cmd - > work ) ;
2012-07-18 14:31:32 -07:00
}
2013-05-07 14:54:35 +08:00
out :
2013-04-10 15:06:14 +08:00
mutex_unlock ( & vq - > mutex ) ;
2012-07-18 14:31:32 -07:00
}
static void vhost_scsi_ctl_handle_kick ( struct vhost_work * work )
{
2012-07-30 13:30:00 -07:00
pr_debug ( " %s: The handling func for control queue. \n " , __func__ ) ;
2012-07-18 14:31:32 -07:00
}
2013-05-06 16:38:27 +08:00
static void
2015-01-31 23:56:53 -08:00
vhost_scsi_send_evt ( struct vhost_scsi * vs ,
struct vhost_scsi_tpg * tpg ,
2013-05-06 16:38:27 +08:00
struct se_lun * lun ,
u32 event ,
u32 reason )
2013-04-25 15:35:21 +08:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_evt * evt ;
2013-04-25 15:35:21 +08:00
2015-01-31 23:56:53 -08:00
evt = vhost_scsi_allocate_evt ( vs , event , reason ) ;
2013-04-25 15:35:21 +08:00
if ( ! evt )
return ;
if ( tpg & & lun ) {
/* TODO: share lun setup code with virtio-scsi.ko */
/*
* Note : evt - > event is zeroed when we allocate it and
* lun [ 4 - 7 ] need to be zero according to virtio - scsi spec .
*/
evt - > event . lun [ 0 ] = 0x01 ;
2015-02-05 10:37:33 +03:00
evt - > event . lun [ 1 ] = tpg - > tport_tpgt ;
2013-04-25 15:35:21 +08:00
if ( lun - > unpacked_lun > = 256 )
evt - > event . lun [ 2 ] = lun - > unpacked_lun > > 8 | 0x40 ;
evt - > event . lun [ 3 ] = lun - > unpacked_lun & 0xFF ;
}
llist_add ( & evt - > list , & vs - > vs_event_list ) ;
vhost_work_queue ( & vs - > dev , & vs - > vs_event_work ) ;
}
2012-07-18 14:31:32 -07:00
static void vhost_scsi_evt_handle_kick ( struct vhost_work * work )
{
2013-04-25 15:35:21 +08:00
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
mutex_lock ( & vq - > mutex ) ;
if ( ! vq - > private_data )
goto out ;
if ( vs - > vs_events_missed )
2015-01-31 23:56:53 -08:00
vhost_scsi_send_evt ( vs , NULL , NULL , VIRTIO_SCSI_T_NO_EVENT , 0 ) ;
2013-04-25 15:35:21 +08:00
out :
mutex_unlock ( & vq - > mutex ) ;
2012-07-18 14:31:32 -07:00
}
static void vhost_scsi_handle_kick ( struct vhost_work * work )
{
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 13:20:59 +08:00
vhost_scsi_handle_vq ( vs , vq ) ;
2012-07-18 14:31:32 -07:00
}
2013-04-03 14:17:37 +08:00
static void vhost_scsi_flush_vq ( struct vhost_scsi * vs , int index )
{
2013-04-27 11:16:48 +08:00
vhost_poll_flush ( & vs - > vqs [ index ] . vq . poll ) ;
2013-04-03 14:17:37 +08:00
}
2013-04-28 15:38:52 +03:00
/* Callers must hold dev mutex */
2013-04-03 14:17:37 +08:00
static void vhost_scsi_flush ( struct vhost_scsi * vs )
{
2013-04-27 11:16:49 +08:00
struct vhost_scsi_inflight * old_inflight [ VHOST_SCSI_MAX_VQ ] ;
2013-04-03 14:17:37 +08:00
int i ;
2013-04-27 11:16:49 +08:00
/* Init new inflight and remember the old inflight */
2015-01-31 23:56:53 -08:00
vhost_scsi_init_inflight ( vs , old_inflight ) ;
2013-04-27 11:16:49 +08:00
/*
* The inflight - > kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished .
*/
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
2015-01-31 23:56:53 -08:00
kref_put ( & old_inflight [ i ] - > kref , vhost_scsi_done_inflight ) ;
2013-04-27 11:16:49 +08:00
/* Flush both the vhost poll and vhost work */
2013-04-03 14:17:37 +08:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
vhost_scsi_flush_vq ( vs , i ) ;
vhost_work_flush ( & vs - > dev , & vs - > vs_completion_work ) ;
2013-04-25 15:35:21 +08:00
vhost_work_flush ( & vs - > dev , & vs - > vs_event_work ) ;
2013-04-27 11:16:49 +08:00
/* Wait for all reqs issued before the flush to be finished */
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
wait_for_completion ( & old_inflight [ i ] - > comp ) ;
2013-04-03 14:17:37 +08:00
}
2012-07-18 14:31:32 -07:00
/*
* Called from vhost_scsi_ioctl ( ) context to walk the list of available
2015-01-31 23:56:53 -08:00
* vhost_scsi_tpg with an active struct vhost_scsi_nexus
2013-04-25 15:35:20 +08:00
*
* The lock nesting rule is :
2015-01-31 23:56:53 -08:00
* vhost_scsi_mutex - > vs - > dev . mutex - > tpg - > tv_tpg_mutex - > vq - > mutex
2012-07-18 14:31:32 -07:00
*/
2013-05-06 16:38:27 +08:00
static int
vhost_scsi_set_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-18 14:31:32 -07:00
{
2014-10-08 06:19:20 +00:00
struct se_portal_group * se_tpg ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport * tv_tport ;
struct vhost_scsi_tpg * tpg ;
struct vhost_scsi_tpg * * vs_tpg ;
2013-04-03 14:17:37 +08:00
struct vhost_virtqueue * vq ;
int index , ret , i , len ;
2013-02-05 12:31:57 +08:00
bool match = false ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
mutex_lock ( & vs - > dev . mutex ) ;
2013-04-25 15:35:20 +08:00
2012-07-18 14:31:32 -07:00
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
/* Verify that ring has been setup correctly. */
2013-04-27 11:16:48 +08:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2013-04-25 15:35:20 +08:00
ret = - EFAULT ;
goto out ;
2012-07-18 14:31:32 -07:00
}
}
2013-04-03 14:17:37 +08:00
len = sizeof ( vs_tpg [ 0 ] ) * VHOST_SCSI_MAX_TARGET ;
vs_tpg = kzalloc ( len , GFP_KERNEL ) ;
if ( ! vs_tpg ) {
2013-04-25 15:35:20 +08:00
ret = - ENOMEM ;
goto out ;
2013-04-03 14:17:37 +08:00
}
if ( vs - > vs_tpg )
memcpy ( vs_tpg , vs - > vs_tpg , len ) ;
2015-01-31 23:56:53 -08:00
list_for_each_entry ( tpg , & vhost_scsi_list , tv_tpg_list ) {
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( ! tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
continue ;
}
2013-05-06 16:38:28 +08:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
continue ;
}
2013-05-06 16:38:28 +08:00
tv_tport = tpg - > tport ;
2012-07-18 14:31:32 -07:00
2013-02-05 12:31:57 +08:00
if ( ! strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 16:38:28 +08:00
if ( vs - > vs_tpg & & vs - > vs_tpg [ tpg - > tport_tpgt ] ) {
2013-04-03 14:17:37 +08:00
kfree ( vs_tpg ) ;
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 15:35:20 +08:00
ret = - EEXIST ;
goto out ;
2012-07-30 13:30:00 -07:00
}
2014-10-08 06:19:20 +00:00
/*
* In order to ensure individual vhost - scsi configfs
* groups cannot be removed while in use by vhost ioctl ,
* go ahead and take an explicit se_tpg - > tpg_group . cg_item
* dependency now .
*/
se_tpg = & tpg - > se_tpg ;
2015-05-03 08:50:52 +02:00
ret = target_depend_item ( & se_tpg - > tpg_group . cg_item ) ;
2014-10-08 06:19:20 +00:00
if ( ret ) {
pr_warn ( " configfs_depend_item() failed: %d \n " , ret ) ;
kfree ( vs_tpg ) ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
goto out ;
}
2013-05-06 16:38:28 +08:00
tpg - > tv_tpg_vhost_count + + ;
tpg - > vhost_scsi = vs ;
vs_tpg [ tpg - > tport_tpgt ] = tpg ;
2014-03-17 18:06:10 +01:00
smp_mb__after_atomic ( ) ;
2013-02-05 12:31:57 +08:00
match = true ;
2012-07-18 14:31:32 -07:00
}
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
}
2013-02-05 12:31:57 +08:00
if ( match ) {
memcpy ( vs - > vs_vhost_wwpn , t - > vhost_wwpn ,
sizeof ( vs - > vs_vhost_wwpn ) ) ;
2013-04-03 14:17:37 +08:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 11:16:48 +08:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 14:17:37 +08:00
mutex_lock ( & vq - > mutex ) ;
2013-05-07 14:54:36 +08:00
vq - > private_data = vs_tpg ;
2013-04-03 14:17:38 +08:00
vhost_init_used ( vq ) ;
2013-04-03 14:17:37 +08:00
mutex_unlock ( & vq - > mutex ) ;
}
2013-02-05 12:31:57 +08:00
ret = 0 ;
} else {
ret = - EEXIST ;
}
2013-04-03 14:17:37 +08:00
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = vs_tpg ;
2013-04-25 15:35:20 +08:00
out :
2013-02-05 12:31:57 +08:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2013-02-05 12:31:57 +08:00
return ret ;
2012-07-18 14:31:32 -07:00
}
2013-05-06 16:38:27 +08:00
static int
vhost_scsi_clear_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-18 14:31:32 -07:00
{
2014-10-08 06:19:20 +00:00
struct se_portal_group * se_tpg ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport * tv_tport ;
struct vhost_scsi_tpg * tpg ;
2013-04-03 14:17:37 +08:00
struct vhost_virtqueue * vq ;
bool match = false ;
2013-02-05 12:31:57 +08:00
int index , ret , i ;
u8 target ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
mutex_lock ( & vs - > dev . mutex ) ;
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
2013-04-27 11:16:48 +08:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2012-07-30 13:30:00 -07:00
ret = - EFAULT ;
2013-03-15 09:14:05 +08:00
goto err_dev ;
2012-07-18 14:31:32 -07:00
}
}
2013-04-03 14:17:37 +08:00
if ( ! vs - > vs_tpg ) {
2013-04-25 15:35:20 +08:00
ret = 0 ;
goto err_dev ;
2013-04-03 14:17:37 +08:00
}
2013-02-05 12:31:57 +08:00
for ( i = 0 ; i < VHOST_SCSI_MAX_TARGET ; i + + ) {
target = i ;
2013-05-06 16:38:28 +08:00
tpg = vs - > vs_tpg [ target ] ;
if ( ! tpg )
2013-02-05 12:31:57 +08:00
continue ;
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_tport = tpg - > tport ;
2013-02-05 12:31:57 +08:00
if ( ! tv_tport ) {
ret = - ENODEV ;
2013-03-15 09:14:05 +08:00
goto err_tpg ;
2013-02-05 12:31:57 +08:00
}
if ( strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 16:38:28 +08:00
pr_warn ( " tv_tport->tport_name: %s, tpg->tport_tpgt: %hu "
2013-02-05 12:31:57 +08:00
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu \n " ,
2013-05-06 16:38:28 +08:00
tv_tport - > tport_name , tpg - > tport_tpgt ,
2013-02-05 12:31:57 +08:00
t - > vhost_wwpn , t - > vhost_tpgt ) ;
ret = - EINVAL ;
2013-03-15 09:14:05 +08:00
goto err_tpg ;
2013-02-05 12:31:57 +08:00
}
2013-05-06 16:38:28 +08:00
tpg - > tv_tpg_vhost_count - - ;
tpg - > vhost_scsi = NULL ;
2013-02-05 12:31:57 +08:00
vs - > vs_tpg [ target ] = NULL ;
2013-04-03 14:17:37 +08:00
match = true ;
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2014-10-08 06:19:20 +00:00
/*
* Release se_tpg - > tpg_group . cg_item configfs dependency now
* to allow vhost - scsi WWPN se_tpg - > tpg_group shutdown to occur .
*/
se_tpg = & tpg - > se_tpg ;
2015-05-03 08:50:52 +02:00
target_undepend_item ( & se_tpg - > tpg_group . cg_item ) ;
2012-07-18 14:31:32 -07:00
}
2013-04-03 14:17:37 +08:00
if ( match ) {
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 11:16:48 +08:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 14:17:37 +08:00
mutex_lock ( & vq - > mutex ) ;
2013-05-07 14:54:36 +08:00
vq - > private_data = NULL ;
2013-04-03 14:17:37 +08:00
mutex_unlock ( & vq - > mutex ) ;
}
}
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = NULL ;
2013-04-25 15:35:21 +08:00
WARN_ON ( vs - > vs_events_nr ) ;
2012-07-18 14:31:32 -07:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
return 0 ;
2012-07-30 13:30:00 -07:00
2013-03-15 09:14:05 +08:00
err_tpg :
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-03-15 09:14:05 +08:00
err_dev :
2012-07-30 13:30:00 -07:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-30 13:30:00 -07:00
return ret ;
2012-07-18 14:31:32 -07:00
}
2013-04-03 14:17:37 +08:00
static int vhost_scsi_set_features ( struct vhost_scsi * vs , u64 features )
{
2014-06-05 15:20:23 +03:00
struct vhost_virtqueue * vq ;
int i ;
2013-04-03 14:17:37 +08:00
if ( features & ~ VHOST_SCSI_FEATURES )
return - EOPNOTSUPP ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( ( features & ( 1 < < VHOST_F_LOG_ALL ) ) & &
! vhost_log_access_ok ( & vs - > dev ) ) {
mutex_unlock ( & vs - > dev . mutex ) ;
return - EFAULT ;
}
2014-06-05 15:20:23 +03:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
mutex_lock ( & vq - > mutex ) ;
vq - > acked_features = features ;
mutex_unlock ( & vq - > mutex ) ;
}
2013-04-03 14:17:37 +08:00
mutex_unlock ( & vs - > dev . mutex ) ;
return 0 ;
}
2012-07-18 14:31:32 -07:00
static int vhost_scsi_open ( struct inode * inode , struct file * f )
{
2013-05-06 16:38:26 +08:00
struct vhost_scsi * vs ;
2013-04-27 11:16:48 +08:00
struct vhost_virtqueue * * vqs ;
2013-09-17 09:30:34 +03:00
int r = - ENOMEM , i ;
2012-07-18 14:31:32 -07:00
2013-09-17 09:30:34 +03:00
vs = kzalloc ( sizeof ( * vs ) , GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT ) ;
if ( ! vs ) {
vs = vzalloc ( sizeof ( * vs ) ) ;
if ( ! vs )
goto err_vs ;
}
2012-07-18 14:31:32 -07:00
2013-04-27 11:16:48 +08:00
vqs = kmalloc ( VHOST_SCSI_MAX_VQ * sizeof ( * vqs ) , GFP_KERNEL ) ;
2013-09-17 09:30:34 +03:00
if ( ! vqs )
goto err_vqs ;
2013-04-27 11:16:48 +08:00
2013-05-06 16:38:26 +08:00
vhost_work_init ( & vs - > vs_completion_work , vhost_scsi_complete_cmd_work ) ;
2015-01-31 23:56:53 -08:00
vhost_work_init ( & vs - > vs_event_work , vhost_scsi_evt_work ) ;
2013-04-25 15:35:21 +08:00
2013-05-06 16:38:26 +08:00
vs - > vs_events_nr = 0 ;
vs - > vs_events_missed = false ;
2012-07-18 14:31:32 -07:00
2013-05-06 16:38:26 +08:00
vqs [ VHOST_SCSI_VQ_CTL ] = & vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq ;
vqs [ VHOST_SCSI_VQ_EVT ] = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq . handle_kick = vhost_scsi_ctl_handle_kick ;
vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq . handle_kick = vhost_scsi_evt_handle_kick ;
2013-04-27 11:16:48 +08:00
for ( i = VHOST_SCSI_VQ_IO ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-05-06 16:38:26 +08:00
vqs [ i ] = & vs - > vqs [ i ] . vq ;
vs - > vqs [ i ] . vq . handle_kick = vhost_scsi_handle_kick ;
2013-04-27 11:16:48 +08:00
}
2013-12-07 04:13:03 +08:00
vhost_dev_init ( & vs - > dev , vqs , VHOST_SCSI_MAX_VQ ) ;
2013-04-27 11:16:49 +08:00
2015-01-31 23:56:53 -08:00
vhost_scsi_init_inflight ( vs , NULL ) ;
2013-04-27 11:16:49 +08:00
2013-05-06 16:38:26 +08:00
f - > private_data = vs ;
2012-07-18 14:31:32 -07:00
return 0 ;
2013-09-17 09:30:34 +03:00
err_vqs :
2014-06-12 19:00:01 +03:00
kvfree ( vs ) ;
2013-09-17 09:30:34 +03:00
err_vs :
return r ;
2012-07-18 14:31:32 -07:00
}
static int vhost_scsi_release ( struct inode * inode , struct file * f )
{
2013-05-06 16:38:26 +08:00
struct vhost_scsi * vs = f - > private_data ;
2013-02-05 12:31:57 +08:00
struct vhost_scsi_target t ;
2012-07-18 14:31:32 -07:00
2013-05-06 16:38:26 +08:00
mutex_lock ( & vs - > dev . mutex ) ;
memcpy ( t . vhost_wwpn , vs - > vs_vhost_wwpn , sizeof ( t . vhost_wwpn ) ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
vhost_scsi_clear_endpoint ( vs , & t ) ;
vhost_dev_stop ( & vs - > dev ) ;
vhost_dev_cleanup ( & vs - > dev , false ) ;
2013-04-25 15:35:21 +08:00
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
2013-05-06 16:38:26 +08:00
vhost_scsi_flush ( vs ) ;
kfree ( vs - > dev . vqs ) ;
2014-06-12 19:00:01 +03:00
kvfree ( vs ) ;
2012-07-18 14:31:32 -07:00
return 0 ;
}
2013-05-06 16:38:27 +08:00
static long
vhost_scsi_ioctl ( struct file * f ,
unsigned int ioctl ,
unsigned long arg )
2012-07-18 14:31:32 -07:00
{
struct vhost_scsi * vs = f - > private_data ;
struct vhost_scsi_target backend ;
void __user * argp = ( void __user * ) arg ;
u64 __user * featurep = argp ;
2013-04-25 15:35:22 +08:00
u32 __user * eventsp = argp ;
u32 events_missed ;
2012-07-18 14:31:32 -07:00
u64 features ;
2012-07-30 13:30:00 -07:00
int r , abi_version = VHOST_SCSI_ABI_VERSION ;
2013-04-27 11:16:48 +08:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2012-07-18 14:31:32 -07:00
switch ( ioctl ) {
case VHOST_SCSI_SET_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-18 15:44:09 -07:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-18 14:31:32 -07:00
return vhost_scsi_set_endpoint ( vs , & backend ) ;
case VHOST_SCSI_CLEAR_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-18 15:44:09 -07:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-18 14:31:32 -07:00
return vhost_scsi_clear_endpoint ( vs , & backend ) ;
case VHOST_SCSI_GET_ABI_VERSION :
2012-07-30 13:30:00 -07:00
if ( copy_to_user ( argp , & abi_version , sizeof abi_version ) )
2012-07-18 14:31:32 -07:00
return - EFAULT ;
return 0 ;
2013-04-25 15:35:22 +08:00
case VHOST_SCSI_SET_EVENTS_MISSED :
if ( get_user ( events_missed , eventsp ) )
return - EFAULT ;
mutex_lock ( & vq - > mutex ) ;
vs - > vs_events_missed = events_missed ;
mutex_unlock ( & vq - > mutex ) ;
return 0 ;
case VHOST_SCSI_GET_EVENTS_MISSED :
mutex_lock ( & vq - > mutex ) ;
events_missed = vs - > vs_events_missed ;
mutex_unlock ( & vq - > mutex ) ;
if ( put_user ( events_missed , eventsp ) )
return - EFAULT ;
return 0 ;
2012-07-18 14:31:32 -07:00
case VHOST_GET_FEATURES :
2013-03-27 17:23:41 -07:00
features = VHOST_SCSI_FEATURES ;
2012-07-18 14:31:32 -07:00
if ( copy_to_user ( featurep , & features , sizeof features ) )
return - EFAULT ;
return 0 ;
case VHOST_SET_FEATURES :
if ( copy_from_user ( & features , featurep , sizeof features ) )
return - EFAULT ;
return vhost_scsi_set_features ( vs , features ) ;
default :
mutex_lock ( & vs - > dev . mutex ) ;
2012-12-06 14:03:34 +02:00
r = vhost_dev_ioctl ( & vs - > dev , ioctl , argp ) ;
/* TODO: flush backend after dev ioctl. */
if ( r = = - ENOIOCTLCMD )
r = vhost_vring_ioctl ( & vs - > dev , ioctl , argp ) ;
2012-07-18 14:31:32 -07:00
mutex_unlock ( & vs - > dev . mutex ) ;
return r ;
}
}
2012-07-30 13:30:00 -07:00
# ifdef CONFIG_COMPAT
static long vhost_scsi_compat_ioctl ( struct file * f , unsigned int ioctl ,
unsigned long arg )
{
return vhost_scsi_ioctl ( f , ioctl , ( unsigned long ) compat_ptr ( arg ) ) ;
}
# endif
2012-07-18 14:31:32 -07:00
static const struct file_operations vhost_scsi_fops = {
. owner = THIS_MODULE ,
. release = vhost_scsi_release ,
. unlocked_ioctl = vhost_scsi_ioctl ,
2012-07-30 13:30:00 -07:00
# ifdef CONFIG_COMPAT
. compat_ioctl = vhost_scsi_compat_ioctl ,
# endif
2012-07-18 14:31:32 -07:00
. open = vhost_scsi_open ,
. llseek = noop_llseek ,
} ;
static struct miscdevice vhost_scsi_misc = {
MISC_DYNAMIC_MINOR ,
" vhost-scsi " ,
& vhost_scsi_fops ,
} ;
static int __init vhost_scsi_register ( void )
{
return misc_register ( & vhost_scsi_misc ) ;
}
static int vhost_scsi_deregister ( void )
{
return misc_deregister ( & vhost_scsi_misc ) ;
}
2015-01-31 23:56:53 -08:00
static char * vhost_scsi_dump_proto_id ( struct vhost_scsi_tport * tport )
2012-07-18 14:31:32 -07:00
{
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return " SAS " ;
case SCSI_PROTOCOL_FCP :
return " FCP " ;
case SCSI_PROTOCOL_ISCSI :
return " iSCSI " ;
default :
break ;
}
return " Unknown " ;
}
2013-05-06 16:38:27 +08:00
static void
2015-01-31 23:56:53 -08:00
vhost_scsi_do_plug ( struct vhost_scsi_tpg * tpg ,
2013-05-06 16:38:27 +08:00
struct se_lun * lun , bool plug )
2013-04-25 15:35:21 +08:00
{
struct vhost_scsi * vs = tpg - > vhost_scsi ;
struct vhost_virtqueue * vq ;
u32 reason ;
if ( ! vs )
return ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( plug )
reason = VIRTIO_SCSI_EVT_RESET_RESCAN ;
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED ;
2013-04-27 11:16:48 +08:00
vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 15:35:21 +08:00
mutex_lock ( & vq - > mutex ) ;
2014-06-05 15:20:23 +03:00
if ( vhost_has_feature ( vq , VIRTIO_SCSI_F_HOTPLUG ) )
2015-01-31 23:56:53 -08:00
vhost_scsi_send_evt ( vs , tpg , lun ,
2014-06-05 15:20:23 +03:00
VIRTIO_SCSI_T_TRANSPORT_RESET , reason ) ;
2013-04-25 15:35:21 +08:00
mutex_unlock ( & vq - > mutex ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_hotplug ( struct vhost_scsi_tpg * tpg , struct se_lun * lun )
2013-04-25 15:35:21 +08:00
{
2015-01-31 23:56:53 -08:00
vhost_scsi_do_plug ( tpg , lun , true ) ;
2013-04-25 15:35:21 +08:00
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_hotunplug ( struct vhost_scsi_tpg * tpg , struct se_lun * lun )
2013-04-25 15:35:21 +08:00
{
2015-01-31 23:56:53 -08:00
vhost_scsi_do_plug ( tpg , lun , false ) ;
2013-04-25 15:35:21 +08:00
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_port_link ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
struct se_lun * lun )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
2013-04-25 15:35:21 +08:00
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count + + ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
vhost_scsi_hotplug ( tpg , lun ) ;
2013-04-25 15:35:21 +08:00
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2013-04-25 15:35:21 +08:00
2012-07-18 14:31:32 -07:00
return 0 ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_port_unlink ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
struct se_lun * lun )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
2013-04-25 15:35:21 +08:00
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count - - ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 15:35:21 +08:00
2015-01-31 23:56:53 -08:00
vhost_scsi_hotunplug ( tpg , lun ) ;
2013-04-25 15:35:21 +08:00
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_free_cmd_map_res ( struct vhost_scsi_nexus * nexus ,
2013-06-21 14:32:04 -07:00
struct se_session * se_sess )
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_cmd * tv_cmd ;
2013-06-21 14:32:04 -07:00
unsigned int i ;
if ( ! se_sess - > sess_cmd_map )
return ;
2015-01-31 23:56:53 -08:00
for ( i = 0 ; i < VHOST_SCSI_DEFAULT_TAGS ; i + + ) {
tv_cmd = & ( ( struct vhost_scsi_cmd * ) se_sess - > sess_cmd_map ) [ i ] ;
2013-06-21 14:32:04 -07:00
kfree ( tv_cmd - > tvc_sgl ) ;
2014-02-22 18:08:24 -08:00
kfree ( tv_cmd - > tvc_prot_sgl ) ;
2013-06-21 14:32:04 -07:00
kfree ( tv_cmd - > tvc_upages ) ;
}
}
2015-03-28 00:03:51 -07:00
static ssize_t vhost_scsi_tpg_attrib_store_fabric_prot_type (
struct se_portal_group * se_tpg ,
const char * page ,
size_t count )
{
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
unsigned long val ;
int ret = kstrtoul ( page , 0 , & val ) ;
if ( ret ) {
pr_err ( " kstrtoul() returned %d for fabric_prot_type \n " , ret ) ;
return ret ;
}
if ( val ! = 0 & & val ! = 1 & & val ! = 3 ) {
pr_err ( " Invalid vhost_scsi fabric_prot_type: %lu \n " , val ) ;
return - EINVAL ;
}
tpg - > tv_fabric_prot_type = val ;
return count ;
}
static ssize_t vhost_scsi_tpg_attrib_show_fabric_prot_type (
struct se_portal_group * se_tpg ,
char * page )
{
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
return sprintf ( page , " %d \n " , tpg - > tv_fabric_prot_type ) ;
}
TF_TPG_ATTRIB_ATTR ( vhost_scsi , fabric_prot_type , S_IRUGO | S_IWUSR ) ;
static struct configfs_attribute * vhost_scsi_tpg_attrib_attrs [ ] = {
& vhost_scsi_tpg_attrib_fabric_prot_type . attr ,
NULL ,
} ;
2015-01-31 23:56:53 -08:00
static int vhost_scsi_make_nexus ( struct vhost_scsi_tpg * tpg ,
2013-05-06 16:38:27 +08:00
const char * name )
2012-07-18 14:31:32 -07:00
{
struct se_portal_group * se_tpg ;
2013-06-21 14:32:04 -07:00
struct se_session * se_sess ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_nexus * tv_nexus ;
struct vhost_scsi_cmd * tv_cmd ;
2013-06-21 14:32:04 -07:00
unsigned int i ;
2012-07-18 14:31:32 -07:00
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_debug ( " tpg->tpg_nexus already exists \n " ) ;
2012-07-18 14:31:32 -07:00
return - EEXIST ;
}
2013-05-06 16:38:28 +08:00
se_tpg = & tpg - > se_tpg ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
tv_nexus = kzalloc ( sizeof ( struct vhost_scsi_nexus ) , GFP_KERNEL ) ;
2012-07-18 14:31:32 -07:00
if ( ! tv_nexus ) {
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2015-01-31 23:56:53 -08:00
pr_err ( " Unable to allocate struct vhost_scsi_nexus \n " ) ;
2012-07-18 14:31:32 -07:00
return - ENOMEM ;
}
/*
2013-06-07 17:47:46 -07:00
* Initialize the struct se_session pointer and setup tagpool
2015-01-31 23:56:53 -08:00
* for struct vhost_scsi_cmd descriptors
2012-07-18 14:31:32 -07:00
*/
2013-06-07 17:47:46 -07:00
tv_nexus - > tvn_se_sess = transport_init_session_tags (
2015-01-31 23:56:53 -08:00
VHOST_SCSI_DEFAULT_TAGS ,
sizeof ( struct vhost_scsi_cmd ) ,
2014-02-22 18:22:31 -08:00
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS ) ;
2012-07-18 14:31:32 -07:00
if ( IS_ERR ( tv_nexus - > tvn_se_sess ) ) {
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
kfree ( tv_nexus ) ;
return - ENOMEM ;
}
2013-06-21 14:32:04 -07:00
se_sess = tv_nexus - > tvn_se_sess ;
2015-01-31 23:56:53 -08:00
for ( i = 0 ; i < VHOST_SCSI_DEFAULT_TAGS ; i + + ) {
tv_cmd = & ( ( struct vhost_scsi_cmd * ) se_sess - > sess_cmd_map ) [ i ] ;
2013-06-21 14:32:04 -07:00
tv_cmd - > tvc_sgl = kzalloc ( sizeof ( struct scatterlist ) *
2015-01-31 23:56:53 -08:00
VHOST_SCSI_PREALLOC_SGLS , GFP_KERNEL ) ;
2013-06-21 14:32:04 -07:00
if ( ! tv_cmd - > tvc_sgl ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate tv_cmd->tvc_sgl \n " ) ;
goto out ;
}
tv_cmd - > tvc_upages = kzalloc ( sizeof ( struct page * ) *
2015-01-31 23:56:53 -08:00
VHOST_SCSI_PREALLOC_UPAGES , GFP_KERNEL ) ;
2013-06-21 14:32:04 -07:00
if ( ! tv_cmd - > tvc_upages ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate tv_cmd->tvc_upages \n " ) ;
goto out ;
}
2014-02-22 18:08:24 -08:00
tv_cmd - > tvc_prot_sgl = kzalloc ( sizeof ( struct scatterlist ) *
2015-01-31 23:56:53 -08:00
VHOST_SCSI_PREALLOC_PROT_SGLS , GFP_KERNEL ) ;
2014-02-22 18:08:24 -08:00
if ( ! tv_cmd - > tvc_prot_sgl ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate tv_cmd->tvc_prot_sgl \n " ) ;
goto out ;
}
2013-06-21 14:32:04 -07:00
}
2012-07-18 14:31:32 -07:00
/*
* Since we are running in ' demo mode ' this call with generate a
2015-01-31 23:56:53 -08:00
* struct se_node_acl for the vhost_scsi struct se_portal_group with
2012-07-18 14:31:32 -07:00
* the SCSI Initiator port name of the passed configfs group ' name ' .
*/
tv_nexus - > tvn_se_sess - > se_node_acl = core_tpg_check_initiator_node_acl (
se_tpg , ( unsigned char * ) name ) ;
if ( ! tv_nexus - > tvn_se_sess - > se_node_acl ) {
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
pr_debug ( " core_tpg_check_initiator_node_acl() failed "
" for %s \n " , name ) ;
2013-06-21 14:32:04 -07:00
goto out ;
2012-07-18 14:31:32 -07:00
}
/*
2015-02-12 11:48:49 +01:00
* Now register the TCM vhost virtual I_T Nexus as active .
2012-07-18 14:31:32 -07:00
*/
2015-02-12 11:48:49 +01:00
transport_register_session ( se_tpg , tv_nexus - > tvn_se_sess - > se_node_acl ,
2012-07-18 14:31:32 -07:00
tv_nexus - > tvn_se_sess , tv_nexus ) ;
2013-05-06 16:38:28 +08:00
tpg - > tpg_nexus = tv_nexus ;
2012-07-18 14:31:32 -07:00
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
return 0 ;
2013-06-21 14:32:04 -07:00
out :
2015-01-31 23:56:53 -08:00
vhost_scsi_free_cmd_map_res ( tv_nexus , se_sess ) ;
2013-06-21 14:32:04 -07:00
transport_free_session ( se_sess ) ;
kfree ( tv_nexus ) ;
return - ENOMEM ;
2012-07-18 14:31:32 -07:00
}
2015-01-31 23:56:53 -08:00
static int vhost_scsi_drop_nexus ( struct vhost_scsi_tpg * tpg )
2012-07-18 14:31:32 -07:00
{
struct se_session * se_sess ;
2015-01-31 23:56:53 -08:00
struct vhost_scsi_nexus * tv_nexus ;
2012-07-18 14:31:32 -07:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
if ( ! tv_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
se_sess = tv_nexus - > tvn_se_sess ;
if ( ! se_sess ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
2012-07-30 13:30:00 -07:00
if ( tpg - > tv_tpg_port_count ! = 0 ) {
2012-07-18 14:31:32 -07:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-30 13:30:00 -07:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-18 14:31:32 -07:00
" active TPG port count: %d \n " ,
2012-07-30 13:30:00 -07:00
tpg - > tv_tpg_port_count ) ;
return - EBUSY ;
2012-07-18 14:31:32 -07:00
}
2012-07-30 13:30:00 -07:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
2012-07-18 14:31:32 -07:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-30 13:30:00 -07:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-18 14:31:32 -07:00
" active TPG vhost count: %d \n " ,
2012-07-30 13:30:00 -07:00
tpg - > tv_tpg_vhost_count ) ;
return - EBUSY ;
2012-07-18 14:31:32 -07:00
}
2012-07-30 13:30:00 -07:00
pr_debug ( " TCM_vhost_ConfigFS: Removing I_T Nexus to emulated "
2015-01-31 23:56:53 -08:00
" %s Initiator Port: %s \n " , vhost_scsi_dump_proto_id ( tpg - > tport ) ,
2012-07-18 14:31:32 -07:00
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-06-21 14:32:04 -07:00
2015-01-31 23:56:53 -08:00
vhost_scsi_free_cmd_map_res ( tv_nexus , se_sess ) ;
2012-07-18 14:31:32 -07:00
/*
2012-07-30 13:30:00 -07:00
* Release the SCSI I_T Nexus to the emulated vhost Target Port
2012-07-18 14:31:32 -07:00
*/
transport_deregister_session ( tv_nexus - > tvn_se_sess ) ;
tpg - > tpg_nexus = NULL ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
kfree ( tv_nexus ) ;
return 0 ;
}
2015-01-31 23:56:53 -08:00
static ssize_t vhost_scsi_tpg_show_nexus ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
char * page )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_nexus * tv_nexus ;
2012-07-18 14:31:32 -07:00
ssize_t ret ;
2013-05-06 16:38:28 +08:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
2012-07-18 14:31:32 -07:00
if ( ! tv_nexus ) {
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
return - ENODEV ;
}
ret = snprintf ( page , PAGE_SIZE , " %s \n " ,
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-05-06 16:38:28 +08:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-18 14:31:32 -07:00
return ret ;
}
2015-01-31 23:56:53 -08:00
static ssize_t vhost_scsi_tpg_store_nexus ( struct se_portal_group * se_tpg ,
2013-05-06 16:38:27 +08:00
const char * page ,
size_t count )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport_wwn = tpg - > tport ;
unsigned char i_port [ VHOST_SCSI_NAMELEN ] , * ptr , * port_ptr ;
2012-07-18 14:31:32 -07:00
int ret ;
/*
* Shutdown the active I_T nexus if ' NULL ' is passed . .
*/
if ( ! strncmp ( page , " NULL " , 4 ) ) {
2015-01-31 23:56:53 -08:00
ret = vhost_scsi_drop_nexus ( tpg ) ;
2012-07-18 14:31:32 -07:00
return ( ! ret ) ? count : ret ;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
2015-01-31 23:56:53 -08:00
* the fabric protocol_id set in vhost_scsi_make_tport ( ) , and call
* vhost_scsi_make_nexus ( ) .
2012-07-18 14:31:32 -07:00
*/
2015-01-31 23:56:53 -08:00
if ( strlen ( page ) > = VHOST_SCSI_NAMELEN ) {
2012-07-18 14:31:32 -07:00
pr_err ( " Emulated NAA Sas Address: %s, exceeds "
2015-01-31 23:56:53 -08:00
" max: %d \n " , page , VHOST_SCSI_NAMELEN ) ;
2012-07-18 14:31:32 -07:00
return - EINVAL ;
}
2015-01-31 23:56:53 -08:00
snprintf ( & i_port [ 0 ] , VHOST_SCSI_NAMELEN , " %s " , page ) ;
2012-07-18 14:31:32 -07:00
ptr = strstr ( i_port , " naa. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_SAS ) {
pr_err ( " Passed SAS Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-01-31 23:56:53 -08:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-18 14:31:32 -07:00
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
ptr = strstr ( i_port , " fc. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_FCP ) {
pr_err ( " Passed FCP Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-01-31 23:56:53 -08:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-18 14:31:32 -07:00
return - EINVAL ;
}
port_ptr = & i_port [ 3 ] ; /* Skip over "fc." */
goto check_newline ;
}
ptr = strstr ( i_port , " iqn. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_ISCSI ) {
pr_err ( " Passed iSCSI Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-01-31 23:56:53 -08:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-18 14:31:32 -07:00
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
pr_err ( " Unable to locate prefix for emulated Initiator Port: "
" %s \n " , i_port ) ;
return - EINVAL ;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline :
if ( i_port [ strlen ( i_port ) - 1 ] = = ' \n ' )
i_port [ strlen ( i_port ) - 1 ] = ' \0 ' ;
2015-01-31 23:56:53 -08:00
ret = vhost_scsi_make_nexus ( tpg , port_ptr ) ;
2012-07-18 14:31:32 -07:00
if ( ret < 0 )
return ret ;
return count ;
}
2015-01-31 23:56:53 -08:00
TF_TPG_BASE_ATTR ( vhost_scsi , nexus , S_IRUGO | S_IWUSR ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
static struct configfs_attribute * vhost_scsi_tpg_attrs [ ] = {
& vhost_scsi_tpg_nexus . attr ,
2012-07-18 14:31:32 -07:00
NULL ,
} ;
2013-05-06 16:38:27 +08:00
static struct se_portal_group *
2015-01-31 23:56:53 -08:00
vhost_scsi_make_tpg ( struct se_wwn * wwn ,
2013-05-06 16:38:27 +08:00
struct config_group * group ,
const char * name )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport * tport = container_of ( wwn ,
struct vhost_scsi_tport , tport_wwn ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg ;
2015-02-05 10:37:33 +03:00
u16 tpgt ;
2012-07-18 14:31:32 -07:00
int ret ;
if ( strstr ( name , " tpgt_ " ) ! = name )
return ERR_PTR ( - EINVAL ) ;
2015-02-05 10:37:33 +03:00
if ( kstrtou16 ( name + 5 , 10 , & tpgt ) | | tpgt > = VHOST_SCSI_MAX_TARGET )
2012-07-18 14:31:32 -07:00
return ERR_PTR ( - EINVAL ) ;
2015-01-31 23:56:53 -08:00
tpg = kzalloc ( sizeof ( struct vhost_scsi_tpg ) , GFP_KERNEL ) ;
2012-07-18 14:31:32 -07:00
if ( ! tpg ) {
2015-01-31 23:56:53 -08:00
pr_err ( " Unable to allocate struct vhost_scsi_tpg " ) ;
2012-07-18 14:31:32 -07:00
return ERR_PTR ( - ENOMEM ) ;
}
mutex_init ( & tpg - > tv_tpg_mutex ) ;
INIT_LIST_HEAD ( & tpg - > tv_tpg_list ) ;
tpg - > tport = tport ;
tpg - > tport_tpgt = tpgt ;
2015-05-01 17:47:56 +02:00
ret = core_tpg_register ( & vhost_scsi_ops , wwn , & tpg - > se_tpg ,
tport - > tport_proto_id ) ;
2012-07-18 14:31:32 -07:00
if ( ret < 0 ) {
kfree ( tpg ) ;
return NULL ;
}
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
list_add_tail ( & tpg - > tv_tpg_list , & vhost_scsi_list ) ;
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
return & tpg - > se_tpg ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_drop_tpg ( struct se_portal_group * se_tpg )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
list_del ( & tpg - > tv_tpg_list ) ;
2015-01-31 23:56:53 -08:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-18 14:31:32 -07:00
/*
2012-07-30 13:30:00 -07:00
* Release the virtual I_T Nexus for this vhost TPG
2012-07-18 14:31:32 -07:00
*/
2015-01-31 23:56:53 -08:00
vhost_scsi_drop_nexus ( tpg ) ;
2012-07-18 14:31:32 -07:00
/*
* Deregister the se_tpg from TCM . .
*/
core_tpg_deregister ( se_tpg ) ;
kfree ( tpg ) ;
}
2013-05-06 16:38:27 +08:00
static struct se_wwn *
2015-01-31 23:56:53 -08:00
vhost_scsi_make_tport ( struct target_fabric_configfs * tf ,
2013-05-06 16:38:27 +08:00
struct config_group * group ,
const char * name )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport * tport ;
2012-07-18 14:31:32 -07:00
char * ptr ;
u64 wwpn = 0 ;
int off = 0 ;
2015-01-31 23:56:53 -08:00
/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2012-07-18 14:31:32 -07:00
return ERR_PTR ( - EINVAL ) ; */
2015-01-31 23:56:53 -08:00
tport = kzalloc ( sizeof ( struct vhost_scsi_tport ) , GFP_KERNEL ) ;
2012-07-18 14:31:32 -07:00
if ( ! tport ) {
2015-01-31 23:56:53 -08:00
pr_err ( " Unable to allocate struct vhost_scsi_tport " ) ;
2012-07-18 14:31:32 -07:00
return ERR_PTR ( - ENOMEM ) ;
}
tport - > tport_wwpn = wwpn ;
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name .
*/
ptr = strstr ( name , " naa. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_SAS ;
goto check_len ;
}
ptr = strstr ( name , " fc. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_FCP ;
off = 3 ; /* Skip over "fc." */
goto check_len ;
}
ptr = strstr ( name , " iqn. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_ISCSI ;
goto check_len ;
}
pr_err ( " Unable to locate prefix for emulated Target Port: "
" %s \n " , name ) ;
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
check_len :
2015-01-31 23:56:53 -08:00
if ( strlen ( name ) > = VHOST_SCSI_NAMELEN ) {
2012-07-18 14:31:32 -07:00
pr_err ( " Emulated %s Address: %s, exceeds "
2015-01-31 23:56:53 -08:00
" max: %d \n " , name , vhost_scsi_dump_proto_id ( tport ) ,
VHOST_SCSI_NAMELEN ) ;
2012-07-18 14:31:32 -07:00
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-01-31 23:56:53 -08:00
snprintf ( & tport - > tport_name [ 0 ] , VHOST_SCSI_NAMELEN , " %s " , & name [ off ] ) ;
2012-07-18 14:31:32 -07:00
pr_debug ( " TCM_VHost_ConfigFS: Allocated emulated Target "
2015-01-31 23:56:53 -08:00
" %s Address: %s \n " , vhost_scsi_dump_proto_id ( tport ) , name ) ;
2012-07-18 14:31:32 -07:00
return & tport - > tport_wwn ;
}
2015-01-31 23:56:53 -08:00
static void vhost_scsi_drop_tport ( struct se_wwn * wwn )
2012-07-18 14:31:32 -07:00
{
2015-01-31 23:56:53 -08:00
struct vhost_scsi_tport * tport = container_of ( wwn ,
struct vhost_scsi_tport , tport_wwn ) ;
2012-07-18 14:31:32 -07:00
pr_debug ( " TCM_VHost_ConfigFS: Deallocating emulated Target "
2015-01-31 23:56:53 -08:00
" %s Address: %s \n " , vhost_scsi_dump_proto_id ( tport ) ,
2012-07-18 14:31:32 -07:00
tport - > tport_name ) ;
kfree ( tport ) ;
}
2013-05-06 16:38:27 +08:00
static ssize_t
2015-01-31 23:56:53 -08:00
vhost_scsi_wwn_show_attr_version ( struct target_fabric_configfs * tf ,
2013-05-06 16:38:27 +08:00
char * page )
2012-07-18 14:31:32 -07:00
{
return sprintf ( page , " TCM_VHOST fabric module %s on %s/%s "
2015-01-31 23:56:53 -08:00
" on " UTS_RELEASE " \n " , VHOST_SCSI_VERSION , utsname ( ) - > sysname ,
2012-07-18 14:31:32 -07:00
utsname ( ) - > machine ) ;
}
2015-01-31 23:56:53 -08:00
TF_WWN_ATTR_RO ( vhost_scsi , version ) ;
2012-07-18 14:31:32 -07:00
2015-01-31 23:56:53 -08:00
static struct configfs_attribute * vhost_scsi_wwn_attrs [ ] = {
& vhost_scsi_wwn_version . attr ,
2012-07-18 14:31:32 -07:00
NULL ,
} ;
2015-01-31 23:56:53 -08:00
static struct target_core_fabric_ops vhost_scsi_ops = {
2015-04-08 20:01:35 +02:00
. module = THIS_MODULE ,
. name = " vhost " ,
2015-01-31 23:56:53 -08:00
. get_fabric_name = vhost_scsi_get_fabric_name ,
. get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident ,
. tpg_get_wwn = vhost_scsi_get_fabric_wwn ,
. tpg_get_tag = vhost_scsi_get_tpgt ,
. tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id ,
. tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len ,
. tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id ,
. tpg_check_demo_mode = vhost_scsi_check_true ,
. tpg_check_demo_mode_cache = vhost_scsi_check_true ,
. tpg_check_demo_mode_write_protect = vhost_scsi_check_false ,
. tpg_check_prod_mode_write_protect = vhost_scsi_check_false ,
2015-03-28 00:03:51 -07:00
. tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only ,
2015-01-31 23:56:53 -08:00
. tpg_get_inst_index = vhost_scsi_tpg_get_inst_index ,
. release_cmd = vhost_scsi_release_cmd ,
2013-06-06 02:20:41 -07:00
. check_stop_free = vhost_scsi_check_stop_free ,
2015-01-31 23:56:53 -08:00
. shutdown_session = vhost_scsi_shutdown_session ,
. close_session = vhost_scsi_close_session ,
. sess_get_index = vhost_scsi_sess_get_index ,
2012-07-18 14:31:32 -07:00
. sess_get_initiator_sid = NULL ,
2015-01-31 23:56:53 -08:00
. write_pending = vhost_scsi_write_pending ,
. write_pending_status = vhost_scsi_write_pending_status ,
. set_default_node_attributes = vhost_scsi_set_default_node_attrs ,
. get_task_tag = vhost_scsi_get_task_tag ,
. get_cmd_state = vhost_scsi_get_cmd_state ,
. queue_data_in = vhost_scsi_queue_data_in ,
. queue_status = vhost_scsi_queue_status ,
. queue_tm_rsp = vhost_scsi_queue_tm_rsp ,
. aborted_task = vhost_scsi_aborted_task ,
2012-07-18 14:31:32 -07:00
/*
* Setup callers for generic logic in target_core_fabric_configfs . c
*/
2015-01-31 23:56:53 -08:00
. fabric_make_wwn = vhost_scsi_make_tport ,
. fabric_drop_wwn = vhost_scsi_drop_tport ,
. fabric_make_tpg = vhost_scsi_make_tpg ,
. fabric_drop_tpg = vhost_scsi_drop_tpg ,
. fabric_post_link = vhost_scsi_port_link ,
. fabric_pre_unlink = vhost_scsi_port_unlink ,
2015-04-08 20:01:35 +02:00
. tfc_wwn_attrs = vhost_scsi_wwn_attrs ,
. tfc_tpg_base_attrs = vhost_scsi_tpg_attrs ,
. tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs ,
2012-07-18 14:31:32 -07:00
} ;
2015-04-08 20:01:35 +02:00
static int __init vhost_scsi_init ( void )
2012-07-18 14:31:32 -07:00
{
2015-04-08 20:01:35 +02:00
int ret = - ENOMEM ;
2012-07-18 14:31:32 -07:00
2015-04-08 20:01:35 +02:00
pr_debug ( " TCM_VHOST fabric module %s on %s/%s "
2015-01-31 23:56:53 -08:00
" on " UTS_RELEASE " \n " , VHOST_SCSI_VERSION , utsname ( ) - > sysname ,
2012-07-18 14:31:32 -07:00
utsname ( ) - > machine ) ;
2012-07-30 13:30:00 -07:00
/*
* Use our own dedicated workqueue for submitting I / O into
* target core to avoid contention within system_wq .
*/
2015-01-31 23:56:53 -08:00
vhost_scsi_workqueue = alloc_workqueue ( " vhost_scsi " , 0 , 0 ) ;
if ( ! vhost_scsi_workqueue )
2012-07-18 14:31:32 -07:00
goto out ;
ret = vhost_scsi_register ( ) ;
if ( ret < 0 )
goto out_destroy_workqueue ;
2015-04-08 20:01:35 +02:00
ret = target_register_template ( & vhost_scsi_ops ) ;
2012-07-18 14:31:32 -07:00
if ( ret < 0 )
goto out_vhost_scsi_deregister ;
return 0 ;
out_vhost_scsi_deregister :
vhost_scsi_deregister ( ) ;
out_destroy_workqueue :
2015-01-31 23:56:53 -08:00
destroy_workqueue ( vhost_scsi_workqueue ) ;
2012-07-18 14:31:32 -07:00
out :
return ret ;
} ;
2015-01-31 23:56:53 -08:00
static void vhost_scsi_exit ( void )
2012-07-18 14:31:32 -07:00
{
2015-04-08 20:01:35 +02:00
target_unregister_template ( & vhost_scsi_ops ) ;
2012-07-18 14:31:32 -07:00
vhost_scsi_deregister ( ) ;
2015-01-31 23:56:53 -08:00
destroy_workqueue ( vhost_scsi_workqueue ) ;
2012-07-18 14:31:32 -07:00
} ;
2013-05-02 03:52:59 +03:00
MODULE_DESCRIPTION ( " VHOST_SCSI series fabric driver " ) ;
MODULE_ALIAS ( " tcm_vhost " ) ;
2012-07-18 14:31:32 -07:00
MODULE_LICENSE ( " GPL " ) ;
2015-01-31 23:56:53 -08:00
module_init ( vhost_scsi_init ) ;
module_exit ( vhost_scsi_exit ) ;