2021-08-21 15:33:20 +03:00
// SPDX-License-Identifier: GPL-2.0+
2012-07-19 01:31:32 +04:00
/*******************************************************************************
* Vhost kernel TCM fabric driver for virtio SCSI initiators
*
2013-09-06 02:29:12 +04:00
* ( C ) Copyright 2010 - 2013 Datera , Inc .
2012-07-19 01:31:32 +04:00
* ( C ) Copyright 2010 - 2012 IBM Corp .
*
2013-09-06 02:29:12 +04:00
* Authors : Nicholas A . Bellinger < nab @ daterainc . com >
2012-07-19 01:31:32 +04:00
* Stefan Hajnoczi < stefanha @ linux . vnet . ibm . com >
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <generated/utsrelease.h>
# include <linux/utsname.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/kthread.h>
# include <linux/types.h>
# include <linux/string.h>
# include <linux/configfs.h>
# include <linux/ctype.h>
# include <linux/compat.h>
# include <linux/eventfd.h>
# include <linux/fs.h>
2015-05-28 21:35:41 +03:00
# include <linux/vmalloc.h>
2012-07-19 01:31:32 +04:00
# include <linux/miscdevice.h>
# include <asm/unaligned.h>
2015-05-08 11:11:12 +03:00
# include <scsi/scsi_common.h>
# include <scsi/scsi_proto.h>
2012-07-19 01:31:32 +04:00
# include <target/target_core_base.h>
# include <target/target_core_fabric.h>
# include <linux/vhost.h>
# include <linux/virtio_scsi.h>
2013-01-06 10:36:13 +04:00
# include <linux/llist.h>
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
# include <linux/bitmap.h>
2012-07-19 01:31:32 +04:00
# include "vhost.h"
2013-05-02 04:50:34 +04:00
2015-02-01 10:56:53 +03:00
# define VHOST_SCSI_VERSION "v0.1"
# define VHOST_SCSI_NAMELEN 256
# define VHOST_SCSI_MAX_CDB_SIZE 32
# define VHOST_SCSI_PREALLOC_SGLS 2048
# define VHOST_SCSI_PREALLOC_UPAGES 2048
2018-08-08 22:29:55 +03:00
# define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
2013-05-02 04:50:34 +04:00
2019-05-17 07:29:49 +03:00
/* Max number of requests before requeueing the job.
* Using this limit prevents one virtqueue from starving others with
* request .
*/
# define VHOST_SCSI_WEIGHT 256
2013-05-02 04:50:34 +04:00
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp ;
/* Refcount for the inflight reqs */
struct kref kref ;
} ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_cmd {
2013-05-02 04:50:34 +04:00
/* Descriptor from vhost_get_vq_desc() for virt_queue segment */
int tvc_vq_desc ;
/* virtio-scsi initiator task attribute */
int tvc_task_attr ;
2015-01-28 00:13:12 +03:00
/* virtio-scsi response incoming iovecs */
int tvc_in_iovs ;
2013-05-02 04:50:34 +04:00
/* virtio-scsi initiator data direction */
enum dma_data_direction tvc_data_direction ;
/* Expected data transfer length from virtio-scsi header */
u32 tvc_exp_data_len ;
/* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */
u64 tvc_tag ;
/* The number of scatterlists associated with this cmd */
u32 tvc_sgl_count ;
2014-02-23 06:34:43 +04:00
u32 tvc_prot_sgl_count ;
2021-02-27 19:59:58 +03:00
/* Saved unpacked SCSI LUN for vhost_scsi_target_queue_cmd() */
2013-05-02 04:50:34 +04:00
u32 tvc_lun ;
/* Pointer to the SGL formatted memory from virtio-scsi */
struct scatterlist * tvc_sgl ;
2014-02-23 06:08:24 +04:00
struct scatterlist * tvc_prot_sgl ;
2013-06-22 01:32:04 +04:00
struct page * * tvc_upages ;
2015-01-28 00:13:12 +03:00
/* Pointer to response header iovec */
2016-06-07 01:07:59 +03:00
struct iovec tvc_resp_iov ;
2013-05-02 04:50:34 +04:00
/* Pointer to vhost_scsi for our device */
struct vhost_scsi * tvc_vhost ;
/* Pointer to vhost_virtqueue for the cmd */
struct vhost_virtqueue * tvc_vq ;
/* Pointer to vhost nexus memory */
2015-02-01 10:56:53 +03:00
struct vhost_scsi_nexus * tvc_nexus ;
2013-05-02 04:50:34 +04:00
/* The TCM I/O descriptor that is accessed via container_of() */
struct se_cmd tvc_se_cmd ;
/* Copy of the incoming SCSI command descriptor block (CDB) */
2015-02-01 10:56:53 +03:00
unsigned char tvc_cdb [ VHOST_SCSI_MAX_CDB_SIZE ] ;
2013-05-02 04:50:34 +04:00
/* Sense buffer that will be mapped into outgoing status */
unsigned char tvc_sense_buf [ TRANSPORT_SENSE_BUFFER ] ;
/* Completed commands list, serviced from vhost worker thread */
struct llist_node tvc_completion_list ;
/* Used to track inflight cmd */
struct vhost_scsi_inflight * inflight ;
} ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_nexus {
2013-05-02 04:50:34 +04:00
/* Pointer to TCM session for I_T Nexus */
struct se_session * tvn_se_sess ;
} ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg {
2013-05-02 04:50:34 +04:00
/* Vhost port target portal group tag for TCM */
u16 tport_tpgt ;
/* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
int tv_tpg_port_count ;
/* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
int tv_tpg_vhost_count ;
2015-03-28 10:03:51 +03:00
/* Used for enabling T10-PI with legacy devices */
int tv_fabric_prot_type ;
2015-02-01 10:56:53 +03:00
/* list for vhost_scsi_list */
2013-05-02 04:50:34 +04:00
struct list_head tv_tpg_list ;
/* Used to protect access for tpg_nexus */
struct mutex tv_tpg_mutex ;
/* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
2015-02-01 10:56:53 +03:00
struct vhost_scsi_nexus * tpg_nexus ;
/* Pointer back to vhost_scsi_tport */
struct vhost_scsi_tport * tport ;
/* Returned by vhost_scsi_make_tpg() */
2013-05-02 04:50:34 +04:00
struct se_portal_group se_tpg ;
/* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
struct vhost_scsi * vhost_scsi ;
2020-11-10 08:33:23 +03:00
struct list_head tmf_queue ;
2013-05-02 04:50:34 +04:00
} ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport {
2013-05-02 04:50:34 +04:00
/* SCSI protocol the tport is providing */
u8 tport_proto_id ;
/* Binary World Wide unique Port Name for Vhost Target port */
u64 tport_wwpn ;
/* ASCII formatted WWPN for Vhost Target port */
2015-02-01 10:56:53 +03:00
char tport_name [ VHOST_SCSI_NAMELEN ] ;
/* Returned by vhost_scsi_make_tport() */
2013-05-02 04:50:34 +04:00
struct se_wwn tport_wwn ;
} ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_evt {
2013-05-02 04:50:34 +04:00
/* event to be sent to guest */
struct virtio_scsi_event event ;
/* event list, serviced from vhost worker thread */
struct llist_node list ;
} ;
2012-07-19 01:31:32 +04:00
2012-07-31 00:30:00 +04:00
enum {
VHOST_SCSI_VQ_CTL = 0 ,
VHOST_SCSI_VQ_EVT = 1 ,
VHOST_SCSI_VQ_IO = 2 ,
} ;
2014-11-23 19:01:34 +03:00
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
2013-03-28 04:23:41 +04:00
enum {
2014-02-23 06:22:31 +04:00
VHOST_SCSI_FEATURES = VHOST_FEATURES | ( 1ULL < < VIRTIO_SCSI_F_HOTPLUG ) |
2015-09-09 22:24:56 +03:00
( 1ULL < < VIRTIO_SCSI_F_T10_PI )
2013-03-28 04:23:41 +04:00
} ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
# define VHOST_SCSI_MAX_TARGET 256
# define VHOST_SCSI_MAX_VQ 128
2013-04-25 11:35:21 +04:00
# define VHOST_SCSI_MAX_EVENT 128
2013-02-05 08:31:57 +04:00
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue {
struct vhost_virtqueue vq ;
2013-04-28 16:38:52 +04:00
/*
* Reference counting for inflight reqs , used for flush operation . At
* each time , one reference tracks new commands submitted , while we
* wait for another one to reach 0.
*/
2013-04-27 07:16:49 +04:00
struct vhost_scsi_inflight inflights [ 2 ] ;
2013-04-28 16:38:52 +04:00
/*
* Indicate current inflight in use , protected by vq - > mutex .
* Writers must also take dev mutex and flush under it .
*/
2013-04-27 07:16:49 +04:00
int inflight_idx ;
2020-11-10 08:33:20 +03:00
struct vhost_scsi_cmd * scsi_cmds ;
struct sbitmap scsi_tags ;
int max_cmds ;
2013-04-27 07:16:48 +04:00
} ;
2012-07-19 01:31:32 +04:00
struct vhost_scsi {
2013-02-05 08:31:57 +04:00
/* Protected by vhost_scsi->dev.mutex */
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * * vs_tpg ;
2013-02-05 08:31:57 +04:00
char vs_vhost_wwpn [ TRANSPORT_IQN_LEN ] ;
2012-07-19 01:31:32 +04:00
struct vhost_dev dev ;
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue vqs [ VHOST_SCSI_MAX_VQ ] ;
2012-07-19 01:31:32 +04:00
struct vhost_work vs_completion_work ; /* cmd completion work item */
2013-01-06 10:36:13 +04:00
struct llist_head vs_completion_list ; /* cmd completion queue */
2013-04-25 11:35:21 +04:00
struct vhost_work vs_event_work ; /* evt injection work item */
struct llist_head vs_event_list ; /* evt injection queue */
bool vs_events_missed ; /* any missed events, protected by vq->mutex */
int vs_events_nr ; /* num of pending events, protected by vq->mutex */
2012-07-19 01:31:32 +04:00
} ;
2020-11-10 08:33:23 +03:00
struct vhost_scsi_tmf {
struct vhost_work vwork ;
struct vhost_scsi_tpg * tpg ;
struct vhost_scsi * vhost ;
struct vhost_scsi_virtqueue * svq ;
struct list_head queue_entry ;
struct se_cmd se_cmd ;
2020-11-20 18:50:59 +03:00
u8 scsi_resp ;
2020-11-10 08:33:23 +03:00
struct vhost_scsi_inflight * inflight ;
struct iovec resp_iov ;
int in_iovs ;
int vq_desc ;
} ;
2018-09-18 03:09:48 +03:00
/*
* Context for processing request and control queue operations .
*/
struct vhost_scsi_ctx {
int head ;
unsigned int out , in ;
size_t req_size , rsp_size ;
size_t out_size , in_size ;
u8 * target , * lunp ;
void * req ;
struct iov_iter out_iter ;
} ;
2015-02-01 10:56:53 +03:00
/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
static DEFINE_MUTEX ( vhost_scsi_mutex ) ;
static LIST_HEAD ( vhost_scsi_list ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
static void vhost_scsi_done_inflight ( struct kref * kref )
2013-04-27 07:16:49 +04:00
{
struct vhost_scsi_inflight * inflight ;
inflight = container_of ( kref , struct vhost_scsi_inflight , kref ) ;
complete ( & inflight - > comp ) ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_init_inflight ( struct vhost_scsi * vs ,
2013-04-27 07:16:49 +04:00
struct vhost_scsi_inflight * old_inflight [ ] )
{
struct vhost_scsi_inflight * new_inflight ;
struct vhost_virtqueue * vq ;
int idx , i ;
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
mutex_lock ( & vq - > mutex ) ;
/* store old infight */
idx = vs - > vqs [ i ] . inflight_idx ;
if ( old_inflight )
old_inflight [ i ] = & vs - > vqs [ i ] . inflights [ idx ] ;
/* setup new infight */
vs - > vqs [ i ] . inflight_idx = idx ^ 1 ;
new_inflight = & vs - > vqs [ i ] . inflights [ idx ^ 1 ] ;
kref_init ( & new_inflight - > kref ) ;
init_completion ( & new_inflight - > comp ) ;
mutex_unlock ( & vq - > mutex ) ;
}
}
static struct vhost_scsi_inflight *
2015-02-01 10:56:53 +03:00
vhost_scsi_get_inflight ( struct vhost_virtqueue * vq )
2013-04-27 07:16:49 +04:00
{
struct vhost_scsi_inflight * inflight ;
struct vhost_scsi_virtqueue * svq ;
svq = container_of ( vq , struct vhost_scsi_virtqueue , vq ) ;
inflight = & svq - > inflights [ svq - > inflight_idx ] ;
kref_get ( & inflight - > kref ) ;
return inflight ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_put_inflight ( struct vhost_scsi_inflight * inflight )
2013-04-27 07:16:49 +04:00
{
2015-02-01 10:56:53 +03:00
kref_put ( & inflight - > kref , vhost_scsi_done_inflight ) ;
2013-04-27 07:16:49 +04:00
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_check_true ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
return 1 ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_check_false ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
return 0 ;
}
2015-02-01 10:56:53 +03:00
static char * vhost_scsi_get_fabric_wwn ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport = tpg - > tport ;
2012-07-19 01:31:32 +04:00
return & tport - > tport_name [ 0 ] ;
}
2015-02-01 10:56:53 +03:00
static u16 vhost_scsi_get_tpgt ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-19 01:31:32 +04:00
return tpg - > tport_tpgt ;
}
2015-03-28 10:03:51 +03:00
static int vhost_scsi_check_prot_fabric_only ( struct se_portal_group * se_tpg )
{
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
return tpg - > tv_fabric_prot_type ;
}
2015-02-01 10:56:53 +03:00
static u32 vhost_scsi_tpg_get_inst_index ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
return 1 ;
}
2020-11-10 08:33:21 +03:00
static void vhost_scsi_release_cmd_res ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_cmd * tv_cmd = container_of ( se_cmd ,
struct vhost_scsi_cmd , tvc_se_cmd ) ;
2020-11-10 08:33:20 +03:00
struct vhost_scsi_virtqueue * svq = container_of ( tv_cmd - > tvc_vq ,
struct vhost_scsi_virtqueue , vq ) ;
struct vhost_scsi_inflight * inflight = tv_cmd - > inflight ;
2014-02-23 06:34:43 +04:00
int i ;
2013-06-06 13:20:41 +04:00
if ( tv_cmd - > tvc_sgl_count ) {
for ( i = 0 ; i < tv_cmd - > tvc_sgl_count ; i + + )
put_page ( sg_page ( & tv_cmd - > tvc_sgl [ i ] ) ) ;
2013-09-17 23:54:31 +04:00
}
2014-02-23 06:34:43 +04:00
if ( tv_cmd - > tvc_prot_sgl_count ) {
for ( i = 0 ; i < tv_cmd - > tvc_prot_sgl_count ; i + + )
put_page ( sg_page ( & tv_cmd - > tvc_prot_sgl [ i ] ) ) ;
}
2013-06-06 13:20:41 +04:00
2020-11-10 08:33:20 +03:00
sbitmap_clear_bit ( & svq - > scsi_tags , se_cmd - > map_tag ) ;
vhost_scsi_put_inflight ( inflight ) ;
2012-07-19 01:31:32 +04:00
}
2020-11-10 08:33:23 +03:00
static void vhost_scsi_release_tmf_res ( struct vhost_scsi_tmf * tmf )
{
struct vhost_scsi_tpg * tpg = tmf - > tpg ;
struct vhost_scsi_inflight * inflight = tmf - > inflight ;
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
list_add_tail ( & tpg - > tmf_queue , & tmf - > queue_entry ) ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
vhost_scsi_put_inflight ( inflight ) ;
}
2020-11-10 08:33:21 +03:00
static void vhost_scsi_release_cmd ( struct se_cmd * se_cmd )
{
2020-11-10 08:33:23 +03:00
if ( se_cmd - > se_cmd_flags & SCF_SCSI_TMR_CDB ) {
struct vhost_scsi_tmf * tmf = container_of ( se_cmd ,
struct vhost_scsi_tmf , se_cmd ) ;
vhost_work_queue ( & tmf - > vhost - > dev , & tmf - > vwork ) ;
} else {
struct vhost_scsi_cmd * cmd = container_of ( se_cmd ,
2020-11-10 08:33:21 +03:00
struct vhost_scsi_cmd , tvc_se_cmd ) ;
2020-11-10 08:33:23 +03:00
struct vhost_scsi * vs = cmd - > tvc_vhost ;
2020-11-10 08:33:21 +03:00
2020-11-10 08:33:23 +03:00
llist_add ( & cmd - > tvc_completion_list , & vs - > vs_completion_list ) ;
vhost_work_queue ( & vs - > dev , & vs - > vs_completion_work ) ;
}
2020-11-10 08:33:21 +03:00
}
2015-02-01 10:56:53 +03:00
static u32 vhost_scsi_sess_get_index ( struct se_session * se_sess )
2012-07-19 01:31:32 +04:00
{
return 0 ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_write_pending ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
/* Go ahead and process the write immediately */
target_execute_cmd ( se_cmd ) ;
return 0 ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_set_default_node_attrs ( struct se_node_acl * nacl )
2012-07-19 01:31:32 +04:00
{
return ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_get_cmd_state ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
return 0 ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_queue_data_in ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
2020-11-10 08:33:21 +03:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_queue_status ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
2020-11-10 08:33:21 +03:00
transport_generic_free_cmd ( se_cmd , 0 ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_queue_tm_rsp ( struct se_cmd * se_cmd )
2012-07-19 01:31:32 +04:00
{
2020-11-10 08:33:23 +03:00
struct vhost_scsi_tmf * tmf = container_of ( se_cmd , struct vhost_scsi_tmf ,
se_cmd ) ;
2020-11-20 18:50:59 +03:00
tmf - > scsi_resp = se_cmd - > se_tmr_req - > response ;
2020-11-10 08:33:23 +03:00
transport_generic_free_cmd ( & tmf - > se_cmd , 0 ) ;
2012-07-19 01:31:32 +04:00
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_aborted_task ( struct se_cmd * se_cmd )
2014-03-23 01:55:56 +04:00
{
return ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_free_evt ( struct vhost_scsi * vs , struct vhost_scsi_evt * evt )
2013-04-25 11:35:21 +04:00
{
vs - > vs_events_nr - - ;
kfree ( evt ) ;
}
2015-02-01 10:56:53 +03:00
static struct vhost_scsi_evt *
vhost_scsi_allocate_evt ( struct vhost_scsi * vs ,
2013-05-06 12:38:27 +04:00
u32 event , u32 reason )
2013-04-25 11:35:21 +04:00
{
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_evt * evt ;
2013-04-25 11:35:21 +04:00
if ( vs - > vs_events_nr > VHOST_SCSI_MAX_EVENT ) {
vs - > vs_events_missed = true ;
return NULL ;
}
evt = kzalloc ( sizeof ( * evt ) , GFP_KERNEL ) ;
if ( ! evt ) {
2015-02-01 10:56:53 +03:00
vq_err ( vq , " Failed to allocate vhost_scsi_evt \n " ) ;
2013-04-25 11:35:21 +04:00
vs - > vs_events_missed = true ;
return NULL ;
}
2014-11-23 19:01:34 +03:00
evt - > event . event = cpu_to_vhost32 ( vq , event ) ;
evt - > event . reason = cpu_to_vhost32 ( vq , reason ) ;
2013-04-25 11:35:21 +04:00
vs - > vs_events_nr + + ;
return evt ;
}
2013-06-06 13:20:41 +04:00
static int vhost_scsi_check_stop_free ( struct se_cmd * se_cmd )
{
2015-04-27 14:52:36 +03:00
return target_put_sess_cmd ( se_cmd ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static void
2015-02-01 10:56:53 +03:00
vhost_scsi_do_evt_work ( struct vhost_scsi * vs , struct vhost_scsi_evt * evt )
2013-04-25 11:35:21 +04:00
{
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
struct virtio_scsi_event * event = & evt - > event ;
struct virtio_scsi_event __user * eventp ;
unsigned out , in ;
int head , ret ;
2020-03-31 22:27:57 +03:00
if ( ! vhost_vq_get_backend ( vq ) ) {
2013-04-25 11:35:21 +04:00
vs - > vs_events_missed = true ;
return ;
}
again :
vhost_disable_notify ( & vs - > dev , vq ) ;
2014-06-05 16:20:27 +04:00
head = vhost_get_vq_desc ( vq , vq - > iov ,
2013-04-25 11:35:21 +04:00
ARRAY_SIZE ( vq - > iov ) , & out , & in ,
NULL , NULL ) ;
if ( head < 0 ) {
vs - > vs_events_missed = true ;
return ;
}
if ( head = = vq - > num ) {
if ( vhost_enable_notify ( & vs - > dev , vq ) )
goto again ;
vs - > vs_events_missed = true ;
return ;
}
if ( ( vq - > iov [ out ] . iov_len ! = sizeof ( struct virtio_scsi_event ) ) ) {
vq_err ( vq , " Expecting virtio_scsi_event, got %zu bytes \n " ,
vq - > iov [ out ] . iov_len ) ;
vs - > vs_events_missed = true ;
return ;
}
if ( vs - > vs_events_missed ) {
2014-11-23 19:01:34 +03:00
event - > event | = cpu_to_vhost32 ( vq , VIRTIO_SCSI_T_EVENTS_MISSED ) ;
2013-04-25 11:35:21 +04:00
vs - > vs_events_missed = false ;
}
eventp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( eventp , event , sizeof ( * event ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
2015-02-01 10:56:53 +03:00
vq_err ( vq , " Faulted on vhost_scsi_send_event \n " ) ;
2013-04-25 11:35:21 +04:00
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_evt_work ( struct vhost_work * work )
2013-04-25 11:35:21 +04:00
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_event_work ) ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2017-05-12 03:42:56 +03:00
struct vhost_scsi_evt * evt , * t ;
2013-04-25 11:35:21 +04:00
struct llist_node * llnode ;
mutex_lock ( & vq - > mutex ) ;
llnode = llist_del_all ( & vs - > vs_event_list ) ;
2017-05-12 03:42:56 +03:00
llist_for_each_entry_safe ( evt , t , llnode , list ) {
2015-02-01 10:56:53 +03:00
vhost_scsi_do_evt_work ( vs , evt ) ;
vhost_scsi_free_evt ( vs , evt ) ;
2013-04-25 11:35:21 +04:00
}
mutex_unlock ( & vq - > mutex ) ;
}
2012-07-19 01:31:32 +04:00
/* Fill in status and signal that we are done processing this command
*
* This is scheduled in the vhost work queue so we are called with the owner
* process mm and can access the vring .
*/
static void vhost_scsi_complete_cmd_work ( struct vhost_work * work )
{
struct vhost_scsi * vs = container_of ( work , struct vhost_scsi ,
vs_completion_work ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
DECLARE_BITMAP ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 10:36:13 +04:00
struct virtio_scsi_cmd_resp v_rsp ;
2017-11-09 03:00:21 +03:00
struct vhost_scsi_cmd * cmd , * t ;
2013-01-06 10:36:13 +04:00
struct llist_node * llnode ;
struct se_cmd * se_cmd ;
2015-01-28 00:13:12 +03:00
struct iov_iter iov_iter ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
int ret , vq ;
2012-07-19 01:31:32 +04:00
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
bitmap_zero ( signal , VHOST_SCSI_MAX_VQ ) ;
2013-01-06 10:36:13 +04:00
llnode = llist_del_all ( & vs - > vs_completion_list ) ;
2017-11-09 03:00:21 +03:00
llist_for_each_entry_safe ( cmd , t , llnode , tvc_completion_list ) {
2013-05-06 12:38:29 +04:00
se_cmd = & cmd - > tvc_se_cmd ;
2012-07-19 01:31:32 +04:00
pr_debug ( " %s tv_cmd %p resid %u status %#02x \n " , __func__ ,
2013-05-06 12:38:29 +04:00
cmd , se_cmd - > residual_count , se_cmd - > scsi_status ) ;
2012-07-19 01:31:32 +04:00
memset ( & v_rsp , 0 , sizeof ( v_rsp ) ) ;
2014-11-23 19:01:34 +03:00
v_rsp . resid = cpu_to_vhost32 ( cmd - > tvc_vq , se_cmd - > residual_count ) ;
2012-07-19 01:31:32 +04:00
/* TODO is status_qualifier field needed? */
v_rsp . status = se_cmd - > scsi_status ;
2014-11-23 19:01:34 +03:00
v_rsp . sense_len = cpu_to_vhost32 ( cmd - > tvc_vq ,
se_cmd - > scsi_sense_length ) ;
2013-05-06 12:38:29 +04:00
memcpy ( v_rsp . sense , cmd - > tvc_sense_buf ,
2014-11-23 19:01:34 +03:00
se_cmd - > scsi_sense_length ) ;
2015-01-28 00:13:12 +03:00
2016-06-07 01:07:59 +03:00
iov_iter_init ( & iov_iter , READ , & cmd - > tvc_resp_iov ,
2015-01-28 00:13:12 +03:00
cmd - > tvc_in_iovs , sizeof ( v_rsp ) ) ;
ret = copy_to_iter ( & v_rsp , sizeof ( v_rsp ) , & iov_iter ) ;
if ( likely ( ret = = sizeof ( v_rsp ) ) ) {
2013-04-27 07:16:48 +04:00
struct vhost_scsi_virtqueue * q ;
2013-05-06 12:38:29 +04:00
vhost_add_used ( cmd - > tvc_vq , cmd - > tvc_vq_desc , 0 ) ;
q = container_of ( cmd - > tvc_vq , struct vhost_scsi_virtqueue , vq ) ;
2013-04-27 07:16:48 +04:00
vq = q - vs - > vqs ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
__set_bit ( vq , signal ) ;
} else
2012-07-19 01:31:32 +04:00
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
2020-11-10 08:33:21 +03:00
vhost_scsi_release_cmd_res ( se_cmd ) ;
2012-07-19 01:31:32 +04:00
}
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
vq = - 1 ;
while ( ( vq = find_next_bit ( signal , VHOST_SCSI_MAX_VQ , vq + 1 ) )
< VHOST_SCSI_MAX_VQ )
2013-04-27 07:16:48 +04:00
vhost_signal ( & vs - > dev , & vs - > vqs [ vq ] . vq ) ;
2012-07-19 01:31:32 +04:00
}
2015-02-01 10:56:53 +03:00
static struct vhost_scsi_cmd *
2020-11-10 08:33:20 +03:00
vhost_scsi_get_cmd ( struct vhost_virtqueue * vq , struct vhost_scsi_tpg * tpg ,
2014-02-23 06:22:31 +04:00
unsigned char * cdb , u64 scsi_tag , u16 lun , u8 task_attr ,
u32 exp_data_len , int data_direction )
2012-07-19 01:31:32 +04:00
{
2020-11-10 08:33:20 +03:00
struct vhost_scsi_virtqueue * svq = container_of ( vq ,
struct vhost_scsi_virtqueue , vq ) ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_cmd * cmd ;
struct vhost_scsi_nexus * tv_nexus ;
2014-02-23 06:08:24 +04:00
struct scatterlist * sg , * prot_sg ;
2013-06-22 01:32:04 +04:00
struct page * * pages ;
2020-11-10 08:33:20 +03:00
int tag ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
tv_nexus = tpg - > tpg_nexus ;
2012-07-19 01:31:32 +04:00
if ( ! tv_nexus ) {
2015-02-01 10:56:53 +03:00
pr_err ( " Unable to locate active struct vhost_scsi_nexus \n " ) ;
2012-07-19 01:31:32 +04:00
return ERR_PTR ( - EIO ) ;
}
2021-01-22 05:33:08 +03:00
tag = sbitmap_get ( & svq - > scsi_tags ) ;
2013-09-23 22:42:28 +04:00
if ( tag < 0 ) {
2015-02-01 10:56:53 +03:00
pr_err ( " Unable to obtain tag for vhost_scsi_cmd \n " ) ;
2013-09-23 22:42:28 +04:00
return ERR_PTR ( - ENOMEM ) ;
}
2020-11-10 08:33:20 +03:00
cmd = & svq - > scsi_cmds [ tag ] ;
2013-06-22 01:32:04 +04:00
sg = cmd - > tvc_sgl ;
2014-02-23 06:08:24 +04:00
prot_sg = cmd - > tvc_prot_sgl ;
2013-06-22 01:32:04 +04:00
pages = cmd - > tvc_upages ;
2017-05-20 14:48:44 +03:00
memset ( cmd , 0 , sizeof ( * cmd ) ) ;
2013-06-22 01:32:04 +04:00
cmd - > tvc_sgl = sg ;
2014-02-23 06:08:24 +04:00
cmd - > tvc_prot_sgl = prot_sg ;
2013-06-22 01:32:04 +04:00
cmd - > tvc_upages = pages ;
2013-06-08 04:47:46 +04:00
cmd - > tvc_se_cmd . map_tag = tag ;
2014-02-23 06:22:31 +04:00
cmd - > tvc_tag = scsi_tag ;
cmd - > tvc_lun = lun ;
cmd - > tvc_task_attr = task_attr ;
2013-05-06 12:38:29 +04:00
cmd - > tvc_exp_data_len = exp_data_len ;
cmd - > tvc_data_direction = data_direction ;
cmd - > tvc_nexus = tv_nexus ;
2015-02-01 10:56:53 +03:00
cmd - > inflight = vhost_scsi_get_inflight ( vq ) ;
2013-05-06 12:38:29 +04:00
2015-02-01 10:56:53 +03:00
memcpy ( cmd - > tvc_cdb , cdb , VHOST_SCSI_MAX_CDB_SIZE ) ;
2014-02-23 06:22:31 +04:00
2013-05-06 12:38:29 +04:00
return cmd ;
2012-07-19 01:31:32 +04:00
}
/*
* Map a user memory range into a scatterlist
*
* Returns the number of scatterlist entries used or - errno on error .
*/
2013-05-06 12:38:27 +04:00
static int
2015-02-01 10:56:53 +03:00
vhost_scsi_map_to_sgl ( struct vhost_scsi_cmd * cmd ,
2017-09-25 05:07:59 +03:00
struct iov_iter * iter ,
2013-06-22 01:32:04 +04:00
struct scatterlist * sgl ,
2014-02-23 06:34:08 +04:00
bool write )
2012-07-19 01:31:32 +04:00
{
2015-01-29 00:10:51 +03:00
struct page * * pages = cmd - > tvc_upages ;
2017-09-25 05:07:59 +03:00
struct scatterlist * sg = sgl ;
ssize_t bytes ;
size_t offset ;
unsigned int npages = 0 ;
2013-06-22 01:32:04 +04:00
2017-09-25 05:07:59 +03:00
bytes = iov_iter_get_pages ( iter , pages , LONG_MAX ,
VHOST_SCSI_PREALLOC_UPAGES , & offset ) ;
2013-01-22 07:20:27 +04:00
/* No pages were pinned */
2017-09-25 05:07:59 +03:00
if ( bytes < = 0 )
return bytes < 0 ? bytes : - EFAULT ;
2013-01-22 07:20:27 +04:00
2017-09-25 05:07:59 +03:00
iov_iter_advance ( iter , bytes ) ;
2012-07-19 01:31:32 +04:00
2017-09-25 05:07:59 +03:00
while ( bytes ) {
unsigned n = min_t ( unsigned , PAGE_SIZE - offset , bytes ) ;
sg_set_page ( sg + + , pages [ npages + + ] , n , offset ) ;
bytes - = n ;
offset = 0 ;
}
return npages ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static int
2015-01-28 11:15:00 +03:00
vhost_scsi_calc_sgls ( struct iov_iter * iter , size_t bytes , int max_sgls )
2012-07-19 01:31:32 +04:00
{
2015-01-28 11:15:00 +03:00
int sgl_count = 0 ;
2012-07-19 01:31:32 +04:00
2015-01-28 11:15:00 +03:00
if ( ! iter | | ! iter - > iov ) {
pr_err ( " %s: iter->iov is NULL, but expected bytes: %zu "
" present \n " , __func__ , bytes ) ;
return - EINVAL ;
}
2013-01-22 07:20:26 +04:00
2015-01-28 11:15:00 +03:00
sgl_count = iov_iter_npages ( iter , 0xffff ) ;
if ( sgl_count > max_sgls ) {
pr_err ( " %s: requested sgl_count: %d exceeds pre-allocated "
" max_sgls: %d \n " , __func__ , sgl_count , max_sgls ) ;
return - EINVAL ;
2014-02-23 06:34:08 +04:00
}
2015-01-28 11:15:00 +03:00
return sgl_count ;
}
2012-07-19 01:31:32 +04:00
2015-01-28 11:15:00 +03:00
static int
2015-02-01 10:56:53 +03:00
vhost_scsi_iov_to_sgl ( struct vhost_scsi_cmd * cmd , bool write ,
struct iov_iter * iter ,
struct scatterlist * sg , int sg_count )
2015-01-28 11:15:00 +03:00
{
2017-09-25 01:36:44 +03:00
struct scatterlist * p = sg ;
2017-09-25 05:07:59 +03:00
int ret ;
2014-02-23 06:34:08 +04:00
2017-09-25 05:07:59 +03:00
while ( iov_iter_count ( iter ) ) {
ret = vhost_scsi_map_to_sgl ( cmd , iter , sg , write ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 ) {
2017-09-25 01:36:44 +03:00
while ( p < sg ) {
struct page * page = sg_page ( p + + ) ;
2015-01-28 11:15:00 +03:00
if ( page )
put_page ( page ) ;
}
2012-07-19 01:31:32 +04:00
return ret ;
}
sg + = ret ;
}
return 0 ;
}
2014-02-23 06:34:43 +04:00
static int
2015-02-01 10:56:53 +03:00
vhost_scsi_mapal ( struct vhost_scsi_cmd * cmd ,
2015-01-28 11:15:00 +03:00
size_t prot_bytes , struct iov_iter * prot_iter ,
size_t data_bytes , struct iov_iter * data_iter )
{
int sgl_count , ret ;
bool write = ( cmd - > tvc_data_direction = = DMA_FROM_DEVICE ) ;
if ( prot_bytes ) {
sgl_count = vhost_scsi_calc_sgls ( prot_iter , prot_bytes ,
2015-02-01 10:56:53 +03:00
VHOST_SCSI_PREALLOC_PROT_SGLS ) ;
2015-01-28 11:15:00 +03:00
if ( sgl_count < 0 )
return sgl_count ;
sg_init_table ( cmd - > tvc_prot_sgl , sgl_count ) ;
cmd - > tvc_prot_sgl_count = sgl_count ;
pr_debug ( " %s prot_sg %p prot_sgl_count %u \n " , __func__ ,
cmd - > tvc_prot_sgl , cmd - > tvc_prot_sgl_count ) ;
ret = vhost_scsi_iov_to_sgl ( cmd , write , prot_iter ,
cmd - > tvc_prot_sgl ,
cmd - > tvc_prot_sgl_count ) ;
2014-02-23 06:34:43 +04:00
if ( ret < 0 ) {
cmd - > tvc_prot_sgl_count = 0 ;
return ret ;
}
2015-01-28 11:15:00 +03:00
}
sgl_count = vhost_scsi_calc_sgls ( data_iter , data_bytes ,
2015-02-01 10:56:53 +03:00
VHOST_SCSI_PREALLOC_SGLS ) ;
2015-01-28 11:15:00 +03:00
if ( sgl_count < 0 )
return sgl_count ;
sg_init_table ( cmd - > tvc_sgl , sgl_count ) ;
cmd - > tvc_sgl_count = sgl_count ;
pr_debug ( " %s data_sg %p data_sgl_count %u \n " , __func__ ,
cmd - > tvc_sgl , cmd - > tvc_sgl_count ) ;
ret = vhost_scsi_iov_to_sgl ( cmd , write , data_iter ,
cmd - > tvc_sgl , cmd - > tvc_sgl_count ) ;
if ( ret < 0 ) {
cmd - > tvc_sgl_count = 0 ;
return ret ;
2014-02-23 06:34:43 +04:00
}
return 0 ;
}
2014-12-21 21:42:08 +03:00
static int vhost_scsi_to_tcm_attr ( int attr )
{
switch ( attr ) {
case VIRTIO_SCSI_S_SIMPLE :
return TCM_SIMPLE_TAG ;
case VIRTIO_SCSI_S_ORDERED :
return TCM_ORDERED_TAG ;
case VIRTIO_SCSI_S_HEAD :
return TCM_HEAD_TAG ;
case VIRTIO_SCSI_S_ACA :
return TCM_ACA_TAG ;
default :
break ;
}
return TCM_SIMPLE_TAG ;
}
2021-02-27 19:59:58 +03:00
static void vhost_scsi_target_queue_cmd ( struct vhost_scsi_cmd * cmd )
2012-07-19 01:31:32 +04:00
{
2013-05-06 12:38:29 +04:00
struct se_cmd * se_cmd = & cmd - > tvc_se_cmd ;
2021-02-27 19:59:58 +03:00
struct vhost_scsi_nexus * tv_nexus ;
2014-02-23 06:22:31 +04:00
struct scatterlist * sg_ptr , * sg_prot_ptr = NULL ;
2012-07-19 01:31:32 +04:00
2014-02-23 06:22:31 +04:00
/* FIXME: BIDI operation */
2013-05-06 12:38:29 +04:00
if ( cmd - > tvc_sgl_count ) {
sg_ptr = cmd - > tvc_sgl ;
2014-02-23 06:22:31 +04:00
if ( cmd - > tvc_prot_sgl_count )
sg_prot_ptr = cmd - > tvc_prot_sgl ;
else
se_cmd - > prot_pto = true ;
2012-07-19 01:31:32 +04:00
} else {
sg_ptr = NULL ;
}
2013-05-06 12:38:29 +04:00
tv_nexus = cmd - > tvc_nexus ;
2012-10-02 05:40:55 +04:00
2015-04-14 14:26:44 +03:00
se_cmd - > tag = 0 ;
scsi: target: vhost-scsi: Convert to new submission API
target_submit_cmd_map_sgls() is being removed, so convert vhost-scsi to the
new submission API. This has it use target_init_cmd(),
target_submit_prep(), target_submit() because we need to have LIO core map
sgls which is now done in target_submit_prep(), and in the next patches we
will do the target_submit step from the LIO workqueue.
Note: vhost-scsi never calls target_stop_session() so
target_submit_cmd_map_sgls() never failed (in the new API target_init_cmd()
handles target_stop_session() being called when cmds are being
submitted). If it were to have used target_stop_session() and got an error,
we would have hit a refcount bug like xen and usb, because it does:
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_generic_free_cmd(se_cmd, 0);
}
transport_send_check_condition_and_sense() calls queue_status which does
transport_generic_free_cmd(), and then we do an extra
transport_generic_free_cmd() call above which would have dropped the
refcount to -1 and the refcount code would spit out errors.
Link: https://lore.kernel.org/r/20210227170006.5077-12-michael.christie@oracle.com
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-27 19:59:52 +03:00
target_init_cmd ( se_cmd , tv_nexus - > tvn_se_sess , & cmd - > tvc_sense_buf [ 0 ] ,
2013-05-06 12:38:29 +04:00
cmd - > tvc_lun , cmd - > tvc_exp_data_len ,
2014-12-21 21:42:08 +03:00
vhost_scsi_to_tcm_attr ( cmd - > tvc_task_attr ) ,
scsi: target: vhost-scsi: Convert to new submission API
target_submit_cmd_map_sgls() is being removed, so convert vhost-scsi to the
new submission API. This has it use target_init_cmd(),
target_submit_prep(), target_submit() because we need to have LIO core map
sgls which is now done in target_submit_prep(), and in the next patches we
will do the target_submit step from the LIO workqueue.
Note: vhost-scsi never calls target_stop_session() so
target_submit_cmd_map_sgls() never failed (in the new API target_init_cmd()
handles target_stop_session() being called when cmds are being
submitted). If it were to have used target_stop_session() and got an error,
we would have hit a refcount bug like xen and usb, because it does:
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_generic_free_cmd(se_cmd, 0);
}
transport_send_check_condition_and_sense() calls queue_status which does
transport_generic_free_cmd(), and then we do an extra
transport_generic_free_cmd() call above which would have dropped the
refcount to -1 and the refcount code would spit out errors.
Link: https://lore.kernel.org/r/20210227170006.5077-12-michael.christie@oracle.com
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-27 19:59:52 +03:00
cmd - > tvc_data_direction , TARGET_SCF_ACK_KREF ) ;
if ( target_submit_prep ( se_cmd , cmd - > tvc_cdb , sg_ptr ,
cmd - > tvc_sgl_count , NULL , 0 , sg_prot_ptr ,
2021-02-27 19:59:56 +03:00
cmd - > tvc_prot_sgl_count , GFP_KERNEL ) )
scsi: target: vhost-scsi: Convert to new submission API
target_submit_cmd_map_sgls() is being removed, so convert vhost-scsi to the
new submission API. This has it use target_init_cmd(),
target_submit_prep(), target_submit() because we need to have LIO core map
sgls which is now done in target_submit_prep(), and in the next patches we
will do the target_submit step from the LIO workqueue.
Note: vhost-scsi never calls target_stop_session() so
target_submit_cmd_map_sgls() never failed (in the new API target_init_cmd()
handles target_stop_session() being called when cmds are being
submitted). If it were to have used target_stop_session() and got an error,
we would have hit a refcount bug like xen and usb, because it does:
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
transport_generic_free_cmd(se_cmd, 0);
}
transport_send_check_condition_and_sense() calls queue_status which does
transport_generic_free_cmd(), and then we do an extra
transport_generic_free_cmd() call above which would have dropped the
refcount to -1 and the refcount code would spit out errors.
Link: https://lore.kernel.org/r/20210227170006.5077-12-michael.christie@oracle.com
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2021-02-27 19:59:52 +03:00
return ;
2021-02-27 19:59:58 +03:00
target_queue_submission ( se_cmd ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static void
vhost_scsi_send_bad_target ( struct vhost_scsi * vs ,
struct vhost_virtqueue * vq ,
int head , unsigned out )
2013-04-10 11:06:15 +04:00
{
struct virtio_scsi_cmd_resp __user * resp ;
struct virtio_scsi_cmd_resp rsp ;
int ret ;
memset ( & rsp , 0 , sizeof ( rsp ) ) ;
rsp . response = VIRTIO_SCSI_S_BAD_TARGET ;
resp = vq - > iov [ out ] . iov_base ;
ret = __copy_to_user ( resp , & rsp , sizeof ( rsp ) ) ;
if ( ! ret )
vhost_add_used_and_signal ( & vs - > dev , vq , head , 0 ) ;
else
pr_err ( " Faulted on virtio_scsi_cmd_resp \n " ) ;
}
2018-09-18 03:09:49 +03:00
static int
vhost_scsi_get_desc ( struct vhost_scsi * vs , struct vhost_virtqueue * vq ,
struct vhost_scsi_ctx * vc )
{
int ret = - ENXIO ;
vc - > head = vhost_get_vq_desc ( vq , vq - > iov ,
ARRAY_SIZE ( vq - > iov ) , & vc - > out , & vc - > in ,
NULL , NULL ) ;
pr_debug ( " vhost_get_vq_desc: head: %d, out: %u in: %u \n " ,
vc - > head , vc - > out , vc - > in ) ;
/* On error, stop handling until the next kick. */
if ( unlikely ( vc - > head < 0 ) )
goto done ;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if ( vc - > head = = vq - > num ) {
if ( unlikely ( vhost_enable_notify ( & vs - > dev , vq ) ) ) {
vhost_disable_notify ( & vs - > dev , vq ) ;
ret = - EAGAIN ;
}
goto done ;
}
/*
* Get the size of request and response buffers .
* FIXME : Not correct for BIDI operation
*/
vc - > out_size = iov_length ( vq - > iov , vc - > out ) ;
vc - > in_size = iov_length ( & vq - > iov [ vc - > out ] , vc - > in ) ;
/*
* Copy over the virtio - scsi request header , which for a
* ANY_LAYOUT enabled guest may span multiple iovecs , or a
* single iovec may contain both the header + outgoing
* WRITE payloads .
*
* copy_from_iter ( ) will advance out_iter , so that it will
* point at the start of the outgoing WRITE payload , if
* DMA_TO_DEVICE is set .
*/
iov_iter_init ( & vc - > out_iter , WRITE , vq - > iov , vc - > out , vc - > out_size ) ;
ret = 0 ;
done :
return ret ;
}
static int
vhost_scsi_chk_size ( struct vhost_virtqueue * vq , struct vhost_scsi_ctx * vc )
{
if ( unlikely ( vc - > in_size < vc - > rsp_size ) ) {
vq_err ( vq ,
" Response buf too small, need min %zu bytes got %zu " ,
vc - > rsp_size , vc - > in_size ) ;
return - EINVAL ;
} else if ( unlikely ( vc - > out_size < vc - > req_size ) ) {
vq_err ( vq ,
" Request buf too small, need min %zu bytes got %zu " ,
vc - > req_size , vc - > out_size ) ;
return - EIO ;
}
return 0 ;
}
static int
vhost_scsi_get_req ( struct vhost_virtqueue * vq , struct vhost_scsi_ctx * vc ,
struct vhost_scsi_tpg * * tpgp )
{
int ret = - EIO ;
if ( unlikely ( ! copy_from_iter_full ( vc - > req , vc - > req_size ,
& vc - > out_iter ) ) ) {
2018-12-13 04:10:14 +03:00
vq_err ( vq , " Faulted on copy_from_iter_full \n " ) ;
2018-09-18 03:09:49 +03:00
} else if ( unlikely ( * vc - > lunp ! = 1 ) ) {
/* virtio-scsi spec requires byte 0 of the lun to be 1 */
vq_err ( vq , " Illegal virtio-scsi lun: %u \n " , * vc - > lunp ) ;
} else {
struct vhost_scsi_tpg * * vs_tpg , * tpg ;
2020-03-31 22:27:57 +03:00
vs_tpg = vhost_vq_get_backend ( vq ) ; /* validated at handler entry */
2018-09-18 03:09:49 +03:00
tpg = READ_ONCE ( vs_tpg [ * vc - > target ] ) ;
if ( unlikely ( ! tpg ) ) {
vq_err ( vq , " Target 0x%x does not exist \n " , * vc - > target ) ;
} else {
if ( tpgp )
* tpgp = tpg ;
ret = 0 ;
}
}
return ret ;
}
2020-11-10 08:33:22 +03:00
static u16 vhost_buf_to_lun ( u8 * lun_buf )
{
return ( ( lun_buf [ 2 ] < < 8 ) | lun_buf [ 3 ] ) & 0x3FFF ;
}
2013-05-06 12:38:27 +04:00
static void
vhost_scsi_handle_vq ( struct vhost_scsi * vs , struct vhost_virtqueue * vq )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * * vs_tpg , * tpg ;
2012-07-19 01:31:32 +04:00
struct virtio_scsi_cmd_req v_req ;
2014-02-23 06:22:31 +04:00
struct virtio_scsi_cmd_req_pi v_req_pi ;
2018-09-18 03:09:49 +03:00
struct vhost_scsi_ctx vc ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_cmd * cmd ;
2018-09-18 03:09:49 +03:00
struct iov_iter in_iter , prot_iter , data_iter ;
2014-02-23 06:22:31 +04:00
u64 tag ;
2015-01-26 08:14:58 +03:00
u32 exp_data_len , data_direction ;
2019-05-17 07:29:52 +03:00
int ret , prot_bytes , c = 0 ;
2014-02-23 06:22:31 +04:00
u16 lun ;
2018-09-18 03:09:49 +03:00
u8 task_attr ;
2015-01-26 08:14:58 +03:00
bool t10_pi = vhost_has_feature ( vq , VIRTIO_SCSI_F_T10_PI ) ;
2018-09-18 03:09:49 +03:00
void * cdb ;
2012-07-19 01:31:32 +04:00
2013-05-07 10:54:35 +04:00
mutex_lock ( & vq - > mutex ) ;
2013-04-03 10:17:37 +04:00
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl .
*/
2020-03-31 22:27:57 +03:00
vs_tpg = vhost_vq_get_backend ( vq ) ;
2013-04-03 10:17:37 +04:00
if ( ! vs_tpg )
2013-05-07 10:54:35 +04:00
goto out ;
2012-07-19 01:31:32 +04:00
2018-09-18 03:09:49 +03:00
memset ( & vc , 0 , sizeof ( vc ) ) ;
vc . rsp_size = sizeof ( struct virtio_scsi_cmd_resp ) ;
2012-07-19 01:31:32 +04:00
vhost_disable_notify ( & vs - > dev , vq ) ;
2019-05-17 07:29:52 +03:00
do {
2018-09-18 03:09:49 +03:00
ret = vhost_scsi_get_desc ( vs , vq , & vc ) ;
if ( ret )
goto err ;
2015-01-26 08:14:58 +03:00
/*
* Setup pointers and values based upon different virtio - scsi
* request header if T10_PI is enabled in KVM guest .
*/
if ( t10_pi ) {
2018-09-18 03:09:49 +03:00
vc . req = & v_req_pi ;
vc . req_size = sizeof ( v_req_pi ) ;
vc . lunp = & v_req_pi . lun [ 0 ] ;
vc . target = & v_req_pi . lun [ 1 ] ;
2014-02-23 06:22:31 +04:00
} else {
2018-09-18 03:09:49 +03:00
vc . req = & v_req ;
vc . req_size = sizeof ( v_req ) ;
vc . lunp = & v_req . lun [ 0 ] ;
vc . target = & v_req . lun [ 1 ] ;
2014-02-23 06:22:31 +04:00
}
2015-01-26 08:14:58 +03:00
/*
2018-09-18 03:09:49 +03:00
* Validate the size of request and response buffers .
* Check for a sane response buffer so we can report
* early errors back to the guest .
2015-01-26 08:14:58 +03:00
*/
2018-09-18 03:09:49 +03:00
ret = vhost_scsi_chk_size ( vq , & vc ) ;
if ( ret )
goto err ;
2012-07-19 01:31:32 +04:00
2018-09-18 03:09:49 +03:00
ret = vhost_scsi_get_req ( vq , & vc , & tpg ) ;
if ( ret )
goto err ;
ret = - EIO ; /* bad target on any error from here on */
2014-02-25 02:13:32 +04:00
2014-02-23 06:22:31 +04:00
/*
2015-01-26 08:14:58 +03:00
* Determine data_direction by calculating the total outgoing
* iovec sizes + incoming iovec sizes vs . virtio - scsi request +
* response headers respectively .
2014-02-23 06:22:31 +04:00
*
2015-01-26 08:14:58 +03:00
* For DMA_TO_DEVICE this is out_iter , which is already pointing
* to the right place .
*
* For DMA_FROM_DEVICE , the iovec will be just past the end
* of the virtio - scsi response header in either the same
* or immediately following iovec .
2014-02-23 06:22:31 +04:00
*
2015-01-26 08:14:58 +03:00
* Any associated T10_PI bytes for the outgoing / incoming
* payloads are included in calculation of exp_data_len here .
2014-02-23 06:22:31 +04:00
*/
2015-01-26 08:14:58 +03:00
prot_bytes = 0 ;
2018-09-18 03:09:49 +03:00
if ( vc . out_size > vc . req_size ) {
2015-01-26 08:14:58 +03:00
data_direction = DMA_TO_DEVICE ;
2018-09-18 03:09:49 +03:00
exp_data_len = vc . out_size - vc . req_size ;
data_iter = vc . out_iter ;
} else if ( vc . in_size > vc . rsp_size ) {
2015-01-26 08:14:58 +03:00
data_direction = DMA_FROM_DEVICE ;
2018-09-18 03:09:49 +03:00
exp_data_len = vc . in_size - vc . rsp_size ;
2015-01-26 08:14:58 +03:00
2018-09-18 03:09:49 +03:00
iov_iter_init ( & in_iter , READ , & vq - > iov [ vc . out ] , vc . in ,
vc . rsp_size + exp_data_len ) ;
iov_iter_advance ( & in_iter , vc . rsp_size ) ;
2015-01-26 08:14:58 +03:00
data_iter = in_iter ;
} else {
data_direction = DMA_NONE ;
exp_data_len = 0 ;
}
/*
* If T10_PI header + payload is present , setup prot_iter values
* and recalculate data_iter for vhost_scsi_mapal ( ) mapping to
* host scatterlists via get_user_pages_fast ( ) .
2014-02-23 06:22:31 +04:00
*/
2015-01-26 08:14:58 +03:00
if ( t10_pi ) {
2014-02-23 06:22:31 +04:00
if ( v_req_pi . pi_bytesout ) {
if ( data_direction ! = DMA_TO_DEVICE ) {
2015-01-26 08:14:58 +03:00
vq_err ( vq , " Received non zero pi_bytesout, "
" but wrong data_direction \n " ) ;
2018-09-18 03:09:49 +03:00
goto err ;
2014-02-23 06:22:31 +04:00
}
2014-11-23 19:01:34 +03:00
prot_bytes = vhost32_to_cpu ( vq , v_req_pi . pi_bytesout ) ;
2014-02-23 06:22:31 +04:00
} else if ( v_req_pi . pi_bytesin ) {
if ( data_direction ! = DMA_FROM_DEVICE ) {
2015-01-26 08:14:58 +03:00
vq_err ( vq , " Received non zero pi_bytesin, "
" but wrong data_direction \n " ) ;
2018-09-18 03:09:49 +03:00
goto err ;
2014-02-23 06:22:31 +04:00
}
2014-11-23 19:01:34 +03:00
prot_bytes = vhost32_to_cpu ( vq , v_req_pi . pi_bytesin ) ;
2014-02-23 06:22:31 +04:00
}
2015-01-26 08:14:58 +03:00
/*
2018-08-22 22:21:53 +03:00
* Set prot_iter to data_iter and truncate it to
* prot_bytes , and advance data_iter past any
2015-01-26 08:14:58 +03:00
* preceeding prot_bytes that may be present .
*
* Also fix up the exp_data_len to reflect only the
* actual data payload length .
*/
2014-02-23 06:22:31 +04:00
if ( prot_bytes ) {
2015-01-26 08:14:58 +03:00
exp_data_len - = prot_bytes ;
prot_iter = data_iter ;
2018-08-22 22:21:53 +03:00
iov_iter_truncate ( & prot_iter , prot_bytes ) ;
2015-01-26 08:14:58 +03:00
iov_iter_advance ( & data_iter , prot_bytes ) ;
2014-02-23 06:22:31 +04:00
}
2014-11-23 19:01:34 +03:00
tag = vhost64_to_cpu ( vq , v_req_pi . tag ) ;
2014-02-23 06:22:31 +04:00
task_attr = v_req_pi . task_attr ;
cdb = & v_req_pi . cdb [ 0 ] ;
2020-11-10 08:33:22 +03:00
lun = vhost_buf_to_lun ( v_req_pi . lun ) ;
2014-02-23 06:22:31 +04:00
} else {
2014-11-23 19:01:34 +03:00
tag = vhost64_to_cpu ( vq , v_req . tag ) ;
2014-02-23 06:22:31 +04:00
task_attr = v_req . task_attr ;
cdb = & v_req . cdb [ 0 ] ;
2020-11-10 08:33:22 +03:00
lun = vhost_buf_to_lun ( v_req . lun ) ;
2014-02-23 06:22:31 +04:00
}
/*
2015-01-26 08:14:58 +03:00
* Check that the received CDB size does not exceeded our
* hardcoded max for vhost - scsi , then get a pre - allocated
* cmd descriptor for the new virtio - scsi tag .
2014-02-23 06:22:31 +04:00
*
* TODO what if cdb was too small for varlen cdb header ?
*/
2015-02-01 10:56:53 +03:00
if ( unlikely ( scsi_command_size ( cdb ) > VHOST_SCSI_MAX_CDB_SIZE ) ) {
2014-02-23 06:22:31 +04:00
vq_err ( vq , " Received SCSI CDB with command_size: %d that "
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d \n " ,
2015-02-01 10:56:53 +03:00
scsi_command_size ( cdb ) , VHOST_SCSI_MAX_CDB_SIZE ) ;
2018-09-18 03:09:49 +03:00
goto err ;
2014-02-23 06:22:31 +04:00
}
2020-11-10 08:33:20 +03:00
cmd = vhost_scsi_get_cmd ( vq , tpg , cdb , tag , lun , task_attr ,
2014-06-10 12:19:38 +04:00
exp_data_len + prot_bytes ,
data_direction ) ;
2013-05-06 12:38:29 +04:00
if ( IS_ERR ( cmd ) ) {
2020-11-10 08:33:20 +03:00
vq_err ( vq , " vhost_scsi_get_cmd failed %ld \n " ,
2015-01-26 08:14:58 +03:00
PTR_ERR ( cmd ) ) ;
2018-09-18 03:09:49 +03:00
goto err ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:29 +04:00
cmd - > tvc_vhost = vs ;
cmd - > tvc_vq = vq ;
2018-09-18 03:09:49 +03:00
cmd - > tvc_resp_iov = vq - > iov [ vc . out ] ;
cmd - > tvc_in_iovs = vc . in ;
2012-07-19 01:31:32 +04:00
pr_debug ( " vhost_scsi got command opcode: %#02x, lun: %d \n " ,
2015-01-26 08:14:58 +03:00
cmd - > tvc_cdb [ 0 ] , cmd - > tvc_lun ) ;
pr_debug ( " cmd: %p exp_data_len: %d, prot_bytes: %d data_direction: "
" %d \n " , cmd , exp_data_len , prot_bytes , data_direction ) ;
2012-07-19 01:31:32 +04:00
if ( data_direction ! = DMA_NONE ) {
2018-09-18 03:09:49 +03:00
if ( unlikely ( vhost_scsi_mapal ( cmd , prot_bytes ,
& prot_iter , exp_data_len ,
& data_iter ) ) ) {
2012-07-19 01:31:32 +04:00
vq_err ( vq , " Failed to map iov to sgl \n " ) ;
2020-11-10 08:33:21 +03:00
vhost_scsi_release_cmd_res ( & cmd - > tvc_se_cmd ) ;
2018-09-18 03:09:49 +03:00
goto err ;
2012-07-19 01:31:32 +04:00
}
}
/*
* Save the descriptor from vhost_get_vq_desc ( ) to be used to
* complete the virtio - scsi request in TCM callback context via
2015-01-26 08:14:58 +03:00
* vhost_scsi_queue_data_in ( ) and vhost_scsi_queue_status ( )
2012-07-19 01:31:32 +04:00
*/
2018-09-18 03:09:49 +03:00
cmd - > tvc_vq_desc = vc . head ;
2021-02-27 19:59:58 +03:00
vhost_scsi_target_queue_cmd ( cmd ) ;
2018-09-18 03:09:49 +03:00
ret = 0 ;
err :
/*
* ENXIO : No more requests , or read error , wait for next kick
* EINVAL : Invalid response buffer , drop the request
* EIO : Respond with bad target
* EAGAIN : Pending request
*/
if ( ret = = - ENXIO )
break ;
else if ( ret = = - EIO )
vhost_scsi_send_bad_target ( vs , vq , vc . head , vc . out ) ;
2019-05-17 07:29:52 +03:00
} while ( likely ( ! vhost_exceeds_weight ( vq , + + c , 0 ) ) ) ;
2013-05-07 10:54:35 +04:00
out :
2013-04-10 11:06:14 +04:00
mutex_unlock ( & vq - > mutex ) ;
2012-07-19 01:31:32 +04:00
}
2018-09-18 03:09:47 +03:00
static void
2020-11-10 08:33:23 +03:00
vhost_scsi_send_tmf_resp ( struct vhost_scsi * vs , struct vhost_virtqueue * vq ,
int in_iovs , int vq_desc , struct iovec * resp_iov ,
int tmf_resp_code )
2018-09-18 03:09:47 +03:00
{
struct virtio_scsi_ctrl_tmf_resp rsp ;
2018-12-04 03:48:23 +03:00
struct iov_iter iov_iter ;
2018-09-18 03:09:47 +03:00
int ret ;
pr_debug ( " %s \n " , __func__ ) ;
memset ( & rsp , 0 , sizeof ( rsp ) ) ;
2020-11-10 08:33:23 +03:00
rsp . response = tmf_resp_code ;
2018-12-04 03:48:23 +03:00
2020-11-10 08:33:23 +03:00
iov_iter_init ( & iov_iter , READ , resp_iov , in_iovs , sizeof ( rsp ) ) ;
2018-12-04 03:48:23 +03:00
ret = copy_to_iter ( & rsp , sizeof ( rsp ) , & iov_iter ) ;
if ( likely ( ret = = sizeof ( rsp ) ) )
2020-11-10 08:33:23 +03:00
vhost_add_used_and_signal ( & vs - > dev , vq , vq_desc , 0 ) ;
2018-09-18 03:09:47 +03:00
else
pr_err ( " Faulted on virtio_scsi_ctrl_tmf_resp \n " ) ;
}
2020-11-10 08:33:23 +03:00
static void vhost_scsi_tmf_resp_work ( struct vhost_work * work )
{
struct vhost_scsi_tmf * tmf = container_of ( work , struct vhost_scsi_tmf ,
vwork ) ;
int resp_code ;
2020-11-20 18:50:59 +03:00
if ( tmf - > scsi_resp = = TMR_FUNCTION_COMPLETE )
2020-11-10 08:33:23 +03:00
resp_code = VIRTIO_SCSI_S_FUNCTION_SUCCEEDED ;
else
resp_code = VIRTIO_SCSI_S_FUNCTION_REJECTED ;
vhost_scsi_send_tmf_resp ( tmf - > vhost , & tmf - > svq - > vq , tmf - > in_iovs ,
tmf - > vq_desc , & tmf - > resp_iov , resp_code ) ;
vhost_scsi_release_tmf_res ( tmf ) ;
}
static void
vhost_scsi_handle_tmf ( struct vhost_scsi * vs , struct vhost_scsi_tpg * tpg ,
struct vhost_virtqueue * vq ,
struct virtio_scsi_ctrl_tmf_req * vtmf ,
struct vhost_scsi_ctx * vc )
{
struct vhost_scsi_virtqueue * svq = container_of ( vq ,
struct vhost_scsi_virtqueue , vq ) ;
struct vhost_scsi_tmf * tmf ;
if ( vhost32_to_cpu ( vq , vtmf - > subtype ) ! =
VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET )
goto send_reject ;
if ( ! tpg - > tpg_nexus | | ! tpg - > tpg_nexus - > tvn_se_sess ) {
pr_err ( " Unable to locate active struct vhost_scsi_nexus for LUN RESET. \n " ) ;
goto send_reject ;
}
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( list_empty ( & tpg - > tmf_queue ) ) {
pr_err ( " Missing reserve TMF. Could not handle LUN RESET. \n " ) ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
goto send_reject ;
}
tmf = list_first_entry ( & tpg - > tmf_queue , struct vhost_scsi_tmf ,
queue_entry ) ;
list_del_init ( & tmf - > queue_entry ) ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
tmf - > tpg = tpg ;
tmf - > vhost = vs ;
tmf - > svq = svq ;
tmf - > resp_iov = vq - > iov [ vc - > out ] ;
tmf - > vq_desc = vc - > head ;
tmf - > in_iovs = vc - > in ;
tmf - > inflight = vhost_scsi_get_inflight ( vq ) ;
if ( target_submit_tmr ( & tmf - > se_cmd , tpg - > tpg_nexus - > tvn_se_sess , NULL ,
vhost_buf_to_lun ( vtmf - > lun ) , NULL ,
TMR_LUN_RESET , GFP_KERNEL , 0 ,
TARGET_SCF_ACK_KREF ) < 0 ) {
vhost_scsi_release_tmf_res ( tmf ) ;
goto send_reject ;
}
return ;
send_reject :
vhost_scsi_send_tmf_resp ( vs , vq , vc - > in , vc - > head , & vq - > iov [ vc - > out ] ,
VIRTIO_SCSI_S_FUNCTION_REJECTED ) ;
}
2018-09-18 03:09:47 +03:00
static void
vhost_scsi_send_an_resp ( struct vhost_scsi * vs ,
2018-09-18 03:09:48 +03:00
struct vhost_virtqueue * vq ,
struct vhost_scsi_ctx * vc )
2018-09-18 03:09:47 +03:00
{
struct virtio_scsi_ctrl_an_resp rsp ;
2018-12-04 03:48:23 +03:00
struct iov_iter iov_iter ;
2018-09-18 03:09:47 +03:00
int ret ;
pr_debug ( " %s \n " , __func__ ) ;
memset ( & rsp , 0 , sizeof ( rsp ) ) ; /* event_actual = 0 */
rsp . response = VIRTIO_SCSI_S_OK ;
2018-12-04 03:48:23 +03:00
iov_iter_init ( & iov_iter , READ , & vq - > iov [ vc - > out ] , vc - > in , sizeof ( rsp ) ) ;
ret = copy_to_iter ( & rsp , sizeof ( rsp ) , & iov_iter ) ;
if ( likely ( ret = = sizeof ( rsp ) ) )
2018-09-18 03:09:48 +03:00
vhost_add_used_and_signal ( & vs - > dev , vq , vc - > head , 0 ) ;
2018-09-18 03:09:47 +03:00
else
pr_err ( " Faulted on virtio_scsi_ctrl_an_resp \n " ) ;
}
static void
vhost_scsi_ctl_handle_vq ( struct vhost_scsi * vs , struct vhost_virtqueue * vq )
{
2020-11-10 08:33:23 +03:00
struct vhost_scsi_tpg * tpg ;
2018-09-18 03:09:47 +03:00
union {
__virtio32 type ;
struct virtio_scsi_ctrl_an_req an ;
struct virtio_scsi_ctrl_tmf_req tmf ;
} v_req ;
2018-09-18 03:09:48 +03:00
struct vhost_scsi_ctx vc ;
size_t typ_size ;
2019-05-17 07:29:52 +03:00
int ret , c = 0 ;
2018-09-18 03:09:47 +03:00
mutex_lock ( & vq - > mutex ) ;
/*
* We can handle the vq only after the endpoint is setup by calling the
* VHOST_SCSI_SET_ENDPOINT ioctl .
*/
2020-03-31 22:27:57 +03:00
if ( ! vhost_vq_get_backend ( vq ) )
2018-09-18 03:09:47 +03:00
goto out ;
2018-09-18 03:09:48 +03:00
memset ( & vc , 0 , sizeof ( vc ) ) ;
2018-09-18 03:09:47 +03:00
vhost_disable_notify ( & vs - > dev , vq ) ;
2019-05-17 07:29:52 +03:00
do {
2018-09-18 03:09:48 +03:00
ret = vhost_scsi_get_desc ( vs , vq , & vc ) ;
if ( ret )
goto err ;
2018-09-18 03:09:47 +03:00
/*
2018-09-18 03:09:48 +03:00
* Get the request type first in order to setup
* other parameters dependent on the type .
2018-09-18 03:09:47 +03:00
*/
2018-09-18 03:09:48 +03:00
vc . req = & v_req . type ;
2018-09-18 03:09:47 +03:00
typ_size = sizeof ( v_req . type ) ;
2018-09-18 03:09:48 +03:00
if ( unlikely ( ! copy_from_iter_full ( vc . req , typ_size ,
& vc . out_iter ) ) ) {
2018-09-18 03:09:47 +03:00
vq_err ( vq , " Faulted on copy_from_iter tmf type \n " ) ;
/*
2018-09-18 03:09:48 +03:00
* The size of the response buffer depends on the
* request type and must be validated against it .
2018-09-18 03:09:47 +03:00
* Since the request type is not known , don ' t send
* a response .
*/
continue ;
}
2020-07-10 13:36:16 +03:00
switch ( vhost32_to_cpu ( vq , v_req . type ) ) {
2018-09-18 03:09:47 +03:00
case VIRTIO_SCSI_T_TMF :
2018-09-18 03:09:48 +03:00
vc . req = & v_req . tmf ;
vc . req_size = sizeof ( struct virtio_scsi_ctrl_tmf_req ) ;
vc . rsp_size = sizeof ( struct virtio_scsi_ctrl_tmf_resp ) ;
vc . lunp = & v_req . tmf . lun [ 0 ] ;
vc . target = & v_req . tmf . lun [ 1 ] ;
2018-09-18 03:09:47 +03:00
break ;
case VIRTIO_SCSI_T_AN_QUERY :
case VIRTIO_SCSI_T_AN_SUBSCRIBE :
2018-09-18 03:09:48 +03:00
vc . req = & v_req . an ;
vc . req_size = sizeof ( struct virtio_scsi_ctrl_an_req ) ;
vc . rsp_size = sizeof ( struct virtio_scsi_ctrl_an_resp ) ;
vc . lunp = & v_req . an . lun [ 0 ] ;
vc . target = NULL ;
2018-09-18 03:09:47 +03:00
break ;
default :
vq_err ( vq , " Unknown control request %d " , v_req . type ) ;
continue ;
}
/*
2018-09-18 03:09:48 +03:00
* Validate the size of request and response buffers .
* Check for a sane response buffer so we can report
* early errors back to the guest .
2018-09-18 03:09:47 +03:00
*/
2018-09-18 03:09:48 +03:00
ret = vhost_scsi_chk_size ( vq , & vc ) ;
if ( ret )
goto err ;
2018-09-18 03:09:47 +03:00
2018-09-18 03:09:48 +03:00
/*
* Get the rest of the request now that its size is known .
*/
vc . req + = typ_size ;
vc . req_size - = typ_size ;
2018-09-18 03:09:47 +03:00
2020-11-10 08:33:23 +03:00
ret = vhost_scsi_get_req ( vq , & vc , & tpg ) ;
2018-09-18 03:09:48 +03:00
if ( ret )
goto err ;
2018-09-18 03:09:47 +03:00
2018-09-18 03:09:48 +03:00
if ( v_req . type = = VIRTIO_SCSI_T_TMF )
2020-11-10 08:33:23 +03:00
vhost_scsi_handle_tmf ( vs , tpg , vq , & v_req . tmf , & vc ) ;
2018-09-18 03:09:48 +03:00
else
vhost_scsi_send_an_resp ( vs , vq , & vc ) ;
err :
/*
* ENXIO : No more requests , or read error , wait for next kick
* EINVAL : Invalid response buffer , drop the request
* EIO : Respond with bad target
* EAGAIN : Pending request
*/
if ( ret = = - ENXIO )
break ;
else if ( ret = = - EIO )
vhost_scsi_send_bad_target ( vs , vq , vc . head , vc . out ) ;
2019-05-17 07:29:52 +03:00
} while ( likely ( ! vhost_exceeds_weight ( vq , + + c , 0 ) ) ) ;
2018-09-18 03:09:47 +03:00
out :
mutex_unlock ( & vq - > mutex ) ;
}
2012-07-19 01:31:32 +04:00
static void vhost_scsi_ctl_handle_kick ( struct vhost_work * work )
{
2018-09-18 03:09:47 +03:00
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
2012-07-31 00:30:00 +04:00
pr_debug ( " %s: The handling func for control queue. \n " , __func__ ) ;
2018-09-18 03:09:47 +03:00
vhost_scsi_ctl_handle_vq ( vs , vq ) ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static void
2015-02-01 10:56:53 +03:00
vhost_scsi_send_evt ( struct vhost_scsi * vs ,
struct vhost_scsi_tpg * tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun ,
u32 event ,
u32 reason )
2013-04-25 11:35:21 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_evt * evt ;
2013-04-25 11:35:21 +04:00
2015-02-01 10:56:53 +03:00
evt = vhost_scsi_allocate_evt ( vs , event , reason ) ;
2013-04-25 11:35:21 +04:00
if ( ! evt )
return ;
if ( tpg & & lun ) {
/* TODO: share lun setup code with virtio-scsi.ko */
/*
* Note : evt - > event is zeroed when we allocate it and
* lun [ 4 - 7 ] need to be zero according to virtio - scsi spec .
*/
evt - > event . lun [ 0 ] = 0x01 ;
2015-02-05 10:37:33 +03:00
evt - > event . lun [ 1 ] = tpg - > tport_tpgt ;
2013-04-25 11:35:21 +04:00
if ( lun - > unpacked_lun > = 256 )
evt - > event . lun [ 2 ] = lun - > unpacked_lun > > 8 | 0x40 ;
evt - > event . lun [ 3 ] = lun - > unpacked_lun & 0xFF ;
}
llist_add ( & evt - > list , & vs - > vs_event_list ) ;
vhost_work_queue ( & vs - > dev , & vs - > vs_event_work ) ;
}
2012-07-19 01:31:32 +04:00
static void vhost_scsi_evt_handle_kick ( struct vhost_work * work )
{
2013-04-25 11:35:21 +04:00
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
mutex_lock ( & vq - > mutex ) ;
2020-03-31 22:27:57 +03:00
if ( ! vhost_vq_get_backend ( vq ) )
2013-04-25 11:35:21 +04:00
goto out ;
if ( vs - > vs_events_missed )
2015-02-01 10:56:53 +03:00
vhost_scsi_send_evt ( vs , NULL , NULL , VIRTIO_SCSI_T_NO_EVENT , 0 ) ;
2013-04-25 11:35:21 +04:00
out :
mutex_unlock ( & vq - > mutex ) ;
2012-07-19 01:31:32 +04:00
}
static void vhost_scsi_handle_kick ( struct vhost_work * work )
{
struct vhost_virtqueue * vq = container_of ( work , struct vhost_virtqueue ,
poll . work ) ;
struct vhost_scsi * vs = container_of ( vq - > dev , struct vhost_scsi , dev ) ;
tcm_vhost: Multi-queue support
This adds virtio-scsi multi-queue support to tcm_vhost. In order to use
multi-queue, guest side multi-queue support is need. It can
be found here:
https://lkml.org/lkml/2012/12/18/166
Currently, only one thread is created by vhost core code for each
vhost_scsi instance. Even if there are multi-queues, all the handling of
guest kick (vhost_scsi_handle_kick) are processed in one thread. This is
not optimal. Luckily, most of the work is offloaded to the tcm_vhost
workqueue.
Some initial perf numbers:
1 queue, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 127K/127k IOPS
4 queues, 4 targets, 1 lun per target
4K request size, 50% randread + 50% randwrite: 181K/181k IOPS
Signed-off-by: Asias He <asias@redhat.com>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
2013-02-06 09:20:59 +04:00
vhost_scsi_handle_vq ( vs , vq ) ;
2012-07-19 01:31:32 +04:00
}
2013-04-28 16:38:52 +04:00
/* Callers must hold dev mutex */
2013-04-03 10:17:37 +04:00
static void vhost_scsi_flush ( struct vhost_scsi * vs )
{
2013-04-27 07:16:49 +04:00
struct vhost_scsi_inflight * old_inflight [ VHOST_SCSI_MAX_VQ ] ;
2013-04-03 10:17:37 +04:00
int i ;
2013-04-27 07:16:49 +04:00
/* Init new inflight and remember the old inflight */
2015-02-01 10:56:53 +03:00
vhost_scsi_init_inflight ( vs , old_inflight ) ;
2013-04-27 07:16:49 +04:00
/*
* The inflight - > kref was initialized to 1. We decrement it here to
* indicate the start of the flush operation so that it will reach 0
* when all the reqs are finished .
*/
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
2015-02-01 10:56:53 +03:00
kref_put ( & old_inflight [ i ] - > kref , vhost_scsi_done_inflight ) ;
2013-04-27 07:16:49 +04:00
/* Flush both the vhost poll and vhost work */
2021-05-25 20:47:29 +03:00
vhost_work_dev_flush ( & vs - > dev ) ;
2013-04-27 07:16:49 +04:00
/* Wait for all reqs issued before the flush to be finished */
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + )
wait_for_completion ( & old_inflight [ i ] - > comp ) ;
2013-04-03 10:17:37 +04:00
}
2020-11-10 08:33:20 +03:00
static void vhost_scsi_destroy_vq_cmds ( struct vhost_virtqueue * vq )
{
struct vhost_scsi_virtqueue * svq = container_of ( vq ,
struct vhost_scsi_virtqueue , vq ) ;
struct vhost_scsi_cmd * tv_cmd ;
unsigned int i ;
if ( ! svq - > scsi_cmds )
return ;
for ( i = 0 ; i < svq - > max_cmds ; i + + ) {
tv_cmd = & svq - > scsi_cmds [ i ] ;
kfree ( tv_cmd - > tvc_sgl ) ;
kfree ( tv_cmd - > tvc_prot_sgl ) ;
kfree ( tv_cmd - > tvc_upages ) ;
}
sbitmap_free ( & svq - > scsi_tags ) ;
kfree ( svq - > scsi_cmds ) ;
svq - > scsi_cmds = NULL ;
}
static int vhost_scsi_setup_vq_cmds ( struct vhost_virtqueue * vq , int max_cmds )
{
struct vhost_scsi_virtqueue * svq = container_of ( vq ,
struct vhost_scsi_virtqueue , vq ) ;
struct vhost_scsi_cmd * tv_cmd ;
unsigned int i ;
if ( svq - > scsi_cmds )
return 0 ;
if ( sbitmap_init_node ( & svq - > scsi_tags , max_cmds , - 1 , GFP_KERNEL ,
2021-01-22 05:33:08 +03:00
NUMA_NO_NODE , false , true ) )
2020-11-10 08:33:20 +03:00
return - ENOMEM ;
svq - > max_cmds = max_cmds ;
svq - > scsi_cmds = kcalloc ( max_cmds , sizeof ( * tv_cmd ) , GFP_KERNEL ) ;
if ( ! svq - > scsi_cmds ) {
sbitmap_free ( & svq - > scsi_tags ) ;
return - ENOMEM ;
}
for ( i = 0 ; i < max_cmds ; i + + ) {
tv_cmd = & svq - > scsi_cmds [ i ] ;
tv_cmd - > tvc_sgl = kcalloc ( VHOST_SCSI_PREALLOC_SGLS ,
sizeof ( struct scatterlist ) ,
GFP_KERNEL ) ;
if ( ! tv_cmd - > tvc_sgl ) {
pr_err ( " Unable to allocate tv_cmd->tvc_sgl \n " ) ;
goto out ;
}
tv_cmd - > tvc_upages = kcalloc ( VHOST_SCSI_PREALLOC_UPAGES ,
sizeof ( struct page * ) ,
GFP_KERNEL ) ;
if ( ! tv_cmd - > tvc_upages ) {
pr_err ( " Unable to allocate tv_cmd->tvc_upages \n " ) ;
goto out ;
}
tv_cmd - > tvc_prot_sgl = kcalloc ( VHOST_SCSI_PREALLOC_PROT_SGLS ,
sizeof ( struct scatterlist ) ,
GFP_KERNEL ) ;
if ( ! tv_cmd - > tvc_prot_sgl ) {
pr_err ( " Unable to allocate tv_cmd->tvc_prot_sgl \n " ) ;
goto out ;
}
}
return 0 ;
out :
vhost_scsi_destroy_vq_cmds ( vq ) ;
return - ENOMEM ;
}
2012-07-19 01:31:32 +04:00
/*
* Called from vhost_scsi_ioctl ( ) context to walk the list of available
2015-02-01 10:56:53 +03:00
* vhost_scsi_tpg with an active struct vhost_scsi_nexus
2013-04-25 11:35:20 +04:00
*
* The lock nesting rule is :
2015-02-01 10:56:53 +03:00
* vhost_scsi_mutex - > vs - > dev . mutex - > tpg - > tv_tpg_mutex - > vq - > mutex
2012-07-19 01:31:32 +04:00
*/
2013-05-06 12:38:27 +04:00
static int
vhost_scsi_set_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-19 01:31:32 +04:00
{
2014-10-08 10:19:20 +04:00
struct se_portal_group * se_tpg ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport * tv_tport ;
struct vhost_scsi_tpg * tpg ;
struct vhost_scsi_tpg * * vs_tpg ;
2013-04-03 10:17:37 +04:00
struct vhost_virtqueue * vq ;
int index , ret , i , len ;
2013-02-05 08:31:57 +04:00
bool match = false ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
2013-04-25 11:35:20 +04:00
2012-07-19 01:31:32 +04:00
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
/* Verify that ring has been setup correctly. */
2013-04-27 07:16:48 +04:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2013-04-25 11:35:20 +04:00
ret = - EFAULT ;
goto out ;
2012-07-19 01:31:32 +04:00
}
}
2013-04-03 10:17:37 +04:00
len = sizeof ( vs_tpg [ 0 ] ) * VHOST_SCSI_MAX_TARGET ;
vs_tpg = kzalloc ( len , GFP_KERNEL ) ;
if ( ! vs_tpg ) {
2013-04-25 11:35:20 +04:00
ret = - ENOMEM ;
goto out ;
2013-04-03 10:17:37 +04:00
}
if ( vs - > vs_tpg )
memcpy ( vs_tpg , vs - > vs_tpg , len ) ;
2015-02-01 10:56:53 +03:00
list_for_each_entry ( tpg , & vhost_scsi_list , tv_tpg_list ) {
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( ! tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
continue ;
}
2013-05-06 12:38:28 +04:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
continue ;
}
2013-05-06 12:38:28 +04:00
tv_tport = tpg - > tport ;
2012-07-19 01:31:32 +04:00
2013-02-05 08:31:57 +04:00
if ( ! strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 12:38:28 +04:00
if ( vs - > vs_tpg & & vs - > vs_tpg [ tpg - > tport_tpgt ] ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 11:35:20 +04:00
ret = - EEXIST ;
2020-11-10 08:33:20 +03:00
goto undepend ;
2012-07-31 00:30:00 +04:00
}
2014-10-08 10:19:20 +04:00
/*
* In order to ensure individual vhost - scsi configfs
* groups cannot be removed while in use by vhost ioctl ,
* go ahead and take an explicit se_tpg - > tpg_group . cg_item
* dependency now .
*/
se_tpg = & tpg - > se_tpg ;
2015-05-03 09:50:52 +03:00
ret = target_depend_item ( & se_tpg - > tpg_group . cg_item ) ;
2014-10-08 10:19:20 +04:00
if ( ret ) {
2018-12-13 04:10:14 +03:00
pr_warn ( " target_depend_item() failed: %d \n " , ret ) ;
2014-10-08 10:19:20 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2020-11-10 08:33:20 +03:00
goto undepend ;
2014-10-08 10:19:20 +04:00
}
2013-05-06 12:38:28 +04:00
tpg - > tv_tpg_vhost_count + + ;
tpg - > vhost_scsi = vs ;
vs_tpg [ tpg - > tport_tpgt ] = tpg ;
2013-02-05 08:31:57 +04:00
match = true ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
}
2013-02-05 08:31:57 +04:00
if ( match ) {
memcpy ( vs - > vs_vhost_wwpn , t - > vhost_wwpn ,
sizeof ( vs - > vs_vhost_wwpn ) ) ;
2020-11-10 08:33:20 +03:00
for ( i = VHOST_SCSI_VQ_IO ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
if ( ! vhost_vq_is_setup ( vq ) )
continue ;
2020-12-04 11:43:30 +03:00
ret = vhost_scsi_setup_vq_cmds ( vq , vq - > num ) ;
if ( ret )
2020-11-10 08:33:20 +03:00
goto destroy_vq_cmds ;
}
2013-04-03 10:17:37 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 10:17:37 +04:00
mutex_lock ( & vq - > mutex ) ;
2020-03-31 22:27:57 +03:00
vhost_vq_set_backend ( vq , vs_tpg ) ;
2016-02-16 17:59:44 +03:00
vhost_vq_init_access ( vq ) ;
2013-04-03 10:17:37 +04:00
mutex_unlock ( & vq - > mutex ) ;
}
2013-02-05 08:31:57 +04:00
ret = 0 ;
} else {
ret = - EEXIST ;
}
2013-04-03 10:17:37 +04:00
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = vs_tpg ;
2020-11-10 08:33:20 +03:00
goto out ;
2013-04-03 10:17:37 +04:00
2020-11-10 08:33:20 +03:00
destroy_vq_cmds :
for ( i - - ; i > = VHOST_SCSI_VQ_IO ; i - - ) {
if ( ! vhost_vq_get_backend ( & vs - > vqs [ i ] . vq ) )
vhost_scsi_destroy_vq_cmds ( & vs - > vqs [ i ] . vq ) ;
}
undepend :
for ( i = 0 ; i < VHOST_SCSI_MAX_TARGET ; i + + ) {
tpg = vs_tpg [ i ] ;
if ( tpg ) {
tpg - > tv_tpg_vhost_count - - ;
target_undepend_item ( & tpg - > se_tpg . tpg_group . cg_item ) ;
}
}
kfree ( vs_tpg ) ;
2013-04-25 11:35:20 +04:00
out :
2013-02-05 08:31:57 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2013-02-05 08:31:57 +04:00
return ret ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:27 +04:00
static int
vhost_scsi_clear_endpoint ( struct vhost_scsi * vs ,
struct vhost_scsi_target * t )
2012-07-19 01:31:32 +04:00
{
2014-10-08 10:19:20 +04:00
struct se_portal_group * se_tpg ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport * tv_tport ;
struct vhost_scsi_tpg * tpg ;
2013-04-03 10:17:37 +04:00
struct vhost_virtqueue * vq ;
bool match = false ;
2013-02-05 08:31:57 +04:00
int index , ret , i ;
u8 target ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
/* Verify that ring has been setup correctly. */
for ( index = 0 ; index < vs - > dev . nvqs ; + + index ) {
2013-04-27 07:16:48 +04:00
if ( ! vhost_vq_access_ok ( & vs - > vqs [ index ] . vq ) ) {
2012-07-31 00:30:00 +04:00
ret = - EFAULT ;
2013-03-15 05:14:05 +04:00
goto err_dev ;
2012-07-19 01:31:32 +04:00
}
}
2013-04-03 10:17:37 +04:00
if ( ! vs - > vs_tpg ) {
2013-04-25 11:35:20 +04:00
ret = 0 ;
goto err_dev ;
2013-04-03 10:17:37 +04:00
}
2013-02-05 08:31:57 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_TARGET ; i + + ) {
target = i ;
2013-05-06 12:38:28 +04:00
tpg = vs - > vs_tpg [ target ] ;
if ( ! tpg )
2013-02-05 08:31:57 +04:00
continue ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_tport = tpg - > tport ;
2013-02-05 08:31:57 +04:00
if ( ! tv_tport ) {
ret = - ENODEV ;
2013-03-15 05:14:05 +04:00
goto err_tpg ;
2013-02-05 08:31:57 +04:00
}
if ( strcmp ( tv_tport - > tport_name , t - > vhost_wwpn ) ) {
2013-05-06 12:38:28 +04:00
pr_warn ( " tv_tport->tport_name: %s, tpg->tport_tpgt: %hu "
2013-02-05 08:31:57 +04:00
" does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu \n " ,
2013-05-06 12:38:28 +04:00
tv_tport - > tport_name , tpg - > tport_tpgt ,
2013-02-05 08:31:57 +04:00
t - > vhost_wwpn , t - > vhost_tpgt ) ;
ret = - EINVAL ;
2013-03-15 05:14:05 +04:00
goto err_tpg ;
2013-02-05 08:31:57 +04:00
}
2013-05-06 12:38:28 +04:00
tpg - > tv_tpg_vhost_count - - ;
tpg - > vhost_scsi = NULL ;
2013-02-05 08:31:57 +04:00
vs - > vs_tpg [ target ] = NULL ;
2013-04-03 10:17:37 +04:00
match = true ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2014-10-08 10:19:20 +04:00
/*
* Release se_tpg - > tpg_group . cg_item configfs dependency now
* to allow vhost - scsi WWPN se_tpg - > tpg_group shutdown to occur .
*/
se_tpg = & tpg - > se_tpg ;
2015-05-03 09:50:52 +03:00
target_undepend_item ( & se_tpg - > tpg_group . cg_item ) ;
2012-07-19 01:31:32 +04:00
}
2013-04-03 10:17:37 +04:00
if ( match ) {
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ i ] . vq ;
2013-04-03 10:17:37 +04:00
mutex_lock ( & vq - > mutex ) ;
2020-03-31 22:27:57 +03:00
vhost_vq_set_backend ( vq , NULL ) ;
2013-04-03 10:17:37 +04:00
mutex_unlock ( & vq - > mutex ) ;
2021-05-25 20:47:31 +03:00
}
/* Make sure cmds are not running before tearing them down. */
vhost_scsi_flush ( vs ) ;
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
2020-11-10 08:33:20 +03:00
vhost_scsi_destroy_vq_cmds ( vq ) ;
2013-04-03 10:17:37 +04:00
}
}
/*
* Act as synchronize_rcu to make sure access to
* old vs - > vs_tpg is finished .
*/
vhost_scsi_flush ( vs ) ;
kfree ( vs - > vs_tpg ) ;
vs - > vs_tpg = NULL ;
2013-04-25 11:35:21 +04:00
WARN_ON ( vs - > vs_events_nr ) ;
2012-07-19 01:31:32 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
2012-07-31 00:30:00 +04:00
2013-03-15 05:14:05 +04:00
err_tpg :
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-03-15 05:14:05 +04:00
err_dev :
2012-07-31 00:30:00 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-31 00:30:00 +04:00
return ret ;
2012-07-19 01:31:32 +04:00
}
2013-04-03 10:17:37 +04:00
static int vhost_scsi_set_features ( struct vhost_scsi * vs , u64 features )
{
2014-06-05 16:20:23 +04:00
struct vhost_virtqueue * vq ;
int i ;
2013-04-03 10:17:37 +04:00
if ( features & ~ VHOST_SCSI_FEATURES )
return - EOPNOTSUPP ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( ( features & ( 1 < < VHOST_F_LOG_ALL ) ) & &
! vhost_log_access_ok ( & vs - > dev ) ) {
mutex_unlock ( & vs - > dev . mutex ) ;
return - EFAULT ;
}
2014-06-05 16:20:23 +04:00
for ( i = 0 ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
vq = & vs - > vqs [ i ] . vq ;
mutex_lock ( & vq - > mutex ) ;
vq - > acked_features = features ;
mutex_unlock ( & vq - > mutex ) ;
}
2013-04-03 10:17:37 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
return 0 ;
}
2012-07-19 01:31:32 +04:00
static int vhost_scsi_open ( struct inode * inode , struct file * f )
{
2013-05-06 12:38:26 +04:00
struct vhost_scsi * vs ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * * vqs ;
2013-09-17 10:30:34 +04:00
int r = - ENOMEM , i ;
2012-07-19 01:31:32 +04:00
2021-01-23 11:08:53 +03:00
vs = kvzalloc ( sizeof ( * vs ) , GFP_KERNEL ) ;
if ( ! vs )
goto err_vs ;
2012-07-19 01:31:32 +04:00
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
vqs = kmalloc_array ( VHOST_SCSI_MAX_VQ , sizeof ( * vqs ) , GFP_KERNEL ) ;
2013-09-17 10:30:34 +04:00
if ( ! vqs )
goto err_vqs ;
2013-04-27 07:16:48 +04:00
2013-05-06 12:38:26 +04:00
vhost_work_init ( & vs - > vs_completion_work , vhost_scsi_complete_cmd_work ) ;
2015-02-01 10:56:53 +03:00
vhost_work_init ( & vs - > vs_event_work , vhost_scsi_evt_work ) ;
2013-04-25 11:35:21 +04:00
2013-05-06 12:38:26 +04:00
vs - > vs_events_nr = 0 ;
vs - > vs_events_missed = false ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:26 +04:00
vqs [ VHOST_SCSI_VQ_CTL ] = & vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq ;
vqs [ VHOST_SCSI_VQ_EVT ] = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
vs - > vqs [ VHOST_SCSI_VQ_CTL ] . vq . handle_kick = vhost_scsi_ctl_handle_kick ;
vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq . handle_kick = vhost_scsi_evt_handle_kick ;
2013-04-27 07:16:48 +04:00
for ( i = VHOST_SCSI_VQ_IO ; i < VHOST_SCSI_MAX_VQ ; i + + ) {
2013-05-06 12:38:26 +04:00
vqs [ i ] = & vs - > vqs [ i ] . vq ;
vs - > vqs [ i ] . vq . handle_kick = vhost_scsi_handle_kick ;
2013-04-27 07:16:48 +04:00
}
2019-05-17 07:29:49 +03:00
vhost_dev_init ( & vs - > dev , vqs , VHOST_SCSI_MAX_VQ , UIO_MAXIOV ,
2020-05-29 11:02:58 +03:00
VHOST_SCSI_WEIGHT , 0 , true , NULL ) ;
2013-04-27 07:16:49 +04:00
2015-02-01 10:56:53 +03:00
vhost_scsi_init_inflight ( vs , NULL ) ;
2013-04-27 07:16:49 +04:00
2013-05-06 12:38:26 +04:00
f - > private_data = vs ;
2012-07-19 01:31:32 +04:00
return 0 ;
2013-09-17 10:30:34 +04:00
err_vqs :
2014-06-12 20:00:01 +04:00
kvfree ( vs ) ;
2013-09-17 10:30:34 +04:00
err_vs :
return r ;
2012-07-19 01:31:32 +04:00
}
static int vhost_scsi_release ( struct inode * inode , struct file * f )
{
2013-05-06 12:38:26 +04:00
struct vhost_scsi * vs = f - > private_data ;
2013-02-05 08:31:57 +04:00
struct vhost_scsi_target t ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:26 +04:00
mutex_lock ( & vs - > dev . mutex ) ;
memcpy ( t . vhost_wwpn , vs - > vs_vhost_wwpn , sizeof ( t . vhost_wwpn ) ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
vhost_scsi_clear_endpoint ( vs , & t ) ;
vhost_dev_stop ( & vs - > dev ) ;
2017-12-24 19:08:58 +03:00
vhost_dev_cleanup ( & vs - > dev ) ;
2013-04-25 11:35:21 +04:00
/* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
2013-05-06 12:38:26 +04:00
vhost_scsi_flush ( vs ) ;
kfree ( vs - > dev . vqs ) ;
2014-06-12 20:00:01 +04:00
kvfree ( vs ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2013-05-06 12:38:27 +04:00
static long
vhost_scsi_ioctl ( struct file * f ,
unsigned int ioctl ,
unsigned long arg )
2012-07-19 01:31:32 +04:00
{
struct vhost_scsi * vs = f - > private_data ;
struct vhost_scsi_target backend ;
void __user * argp = ( void __user * ) arg ;
u64 __user * featurep = argp ;
2013-04-25 11:35:22 +04:00
u32 __user * eventsp = argp ;
u32 events_missed ;
2012-07-19 01:31:32 +04:00
u64 features ;
2012-07-31 00:30:00 +04:00
int r , abi_version = VHOST_SCSI_ABI_VERSION ;
2013-04-27 07:16:48 +04:00
struct vhost_virtqueue * vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2012-07-19 01:31:32 +04:00
switch ( ioctl ) {
case VHOST_SCSI_SET_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-19 02:44:09 +04:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-19 01:31:32 +04:00
return vhost_scsi_set_endpoint ( vs , & backend ) ;
case VHOST_SCSI_CLEAR_ENDPOINT :
if ( copy_from_user ( & backend , argp , sizeof backend ) )
return - EFAULT ;
2012-08-19 02:44:09 +04:00
if ( backend . reserved ! = 0 )
return - EOPNOTSUPP ;
2012-07-19 01:31:32 +04:00
return vhost_scsi_clear_endpoint ( vs , & backend ) ;
case VHOST_SCSI_GET_ABI_VERSION :
2012-07-31 00:30:00 +04:00
if ( copy_to_user ( argp , & abi_version , sizeof abi_version ) )
2012-07-19 01:31:32 +04:00
return - EFAULT ;
return 0 ;
2013-04-25 11:35:22 +04:00
case VHOST_SCSI_SET_EVENTS_MISSED :
if ( get_user ( events_missed , eventsp ) )
return - EFAULT ;
mutex_lock ( & vq - > mutex ) ;
vs - > vs_events_missed = events_missed ;
mutex_unlock ( & vq - > mutex ) ;
return 0 ;
case VHOST_SCSI_GET_EVENTS_MISSED :
mutex_lock ( & vq - > mutex ) ;
events_missed = vs - > vs_events_missed ;
mutex_unlock ( & vq - > mutex ) ;
if ( put_user ( events_missed , eventsp ) )
return - EFAULT ;
return 0 ;
2012-07-19 01:31:32 +04:00
case VHOST_GET_FEATURES :
2013-03-28 04:23:41 +04:00
features = VHOST_SCSI_FEATURES ;
2012-07-19 01:31:32 +04:00
if ( copy_to_user ( featurep , & features , sizeof features ) )
return - EFAULT ;
return 0 ;
case VHOST_SET_FEATURES :
if ( copy_from_user ( & features , featurep , sizeof features ) )
return - EFAULT ;
return vhost_scsi_set_features ( vs , features ) ;
default :
mutex_lock ( & vs - > dev . mutex ) ;
2012-12-06 16:03:34 +04:00
r = vhost_dev_ioctl ( & vs - > dev , ioctl , argp ) ;
/* TODO: flush backend after dev ioctl. */
if ( r = = - ENOIOCTLCMD )
r = vhost_vring_ioctl ( & vs - > dev , ioctl , argp ) ;
2012-07-19 01:31:32 +04:00
mutex_unlock ( & vs - > dev . mutex ) ;
return r ;
}
}
static const struct file_operations vhost_scsi_fops = {
. owner = THIS_MODULE ,
. release = vhost_scsi_release ,
. unlocked_ioctl = vhost_scsi_ioctl ,
2018-09-11 18:23:00 +03:00
. compat_ioctl = compat_ptr_ioctl ,
2012-07-19 01:31:32 +04:00
. open = vhost_scsi_open ,
. llseek = noop_llseek ,
} ;
static struct miscdevice vhost_scsi_misc = {
MISC_DYNAMIC_MINOR ,
" vhost-scsi " ,
& vhost_scsi_fops ,
} ;
static int __init vhost_scsi_register ( void )
{
return misc_register ( & vhost_scsi_misc ) ;
}
2015-07-31 01:59:57 +03:00
static void vhost_scsi_deregister ( void )
2012-07-19 01:31:32 +04:00
{
2015-07-31 01:59:57 +03:00
misc_deregister ( & vhost_scsi_misc ) ;
2012-07-19 01:31:32 +04:00
}
2015-02-01 10:56:53 +03:00
static char * vhost_scsi_dump_proto_id ( struct vhost_scsi_tport * tport )
2012-07-19 01:31:32 +04:00
{
switch ( tport - > tport_proto_id ) {
case SCSI_PROTOCOL_SAS :
return " SAS " ;
case SCSI_PROTOCOL_FCP :
return " FCP " ;
case SCSI_PROTOCOL_ISCSI :
return " iSCSI " ;
default :
break ;
}
return " Unknown " ;
}
2013-05-06 12:38:27 +04:00
static void
2015-02-01 10:56:53 +03:00
vhost_scsi_do_plug ( struct vhost_scsi_tpg * tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun , bool plug )
2013-04-25 11:35:21 +04:00
{
struct vhost_scsi * vs = tpg - > vhost_scsi ;
struct vhost_virtqueue * vq ;
u32 reason ;
if ( ! vs )
return ;
mutex_lock ( & vs - > dev . mutex ) ;
if ( plug )
reason = VIRTIO_SCSI_EVT_RESET_RESCAN ;
else
reason = VIRTIO_SCSI_EVT_RESET_REMOVED ;
2013-04-27 07:16:48 +04:00
vq = & vs - > vqs [ VHOST_SCSI_VQ_EVT ] . vq ;
2013-04-25 11:35:21 +04:00
mutex_lock ( & vq - > mutex ) ;
2014-06-05 16:20:23 +04:00
if ( vhost_has_feature ( vq , VIRTIO_SCSI_F_HOTPLUG ) )
2015-02-01 10:56:53 +03:00
vhost_scsi_send_evt ( vs , tpg , lun ,
2014-06-05 16:20:23 +04:00
VIRTIO_SCSI_T_TRANSPORT_RESET , reason ) ;
2013-04-25 11:35:21 +04:00
mutex_unlock ( & vq - > mutex ) ;
mutex_unlock ( & vs - > dev . mutex ) ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_hotplug ( struct vhost_scsi_tpg * tpg , struct se_lun * lun )
2013-04-25 11:35:21 +04:00
{
2015-02-01 10:56:53 +03:00
vhost_scsi_do_plug ( tpg , lun , true ) ;
2013-04-25 11:35:21 +04:00
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_hotunplug ( struct vhost_scsi_tpg * tpg , struct se_lun * lun )
2013-04-25 11:35:21 +04:00
{
2015-02-01 10:56:53 +03:00
vhost_scsi_do_plug ( tpg , lun , false ) ;
2013-04-25 11:35:21 +04:00
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_port_link ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2020-11-10 08:33:23 +03:00
struct vhost_scsi_tmf * tmf ;
tmf = kzalloc ( sizeof ( * tmf ) , GFP_KERNEL ) ;
if ( ! tmf )
return - ENOMEM ;
INIT_LIST_HEAD ( & tmf - > queue_entry ) ;
vhost_work_init ( & tmf - > vwork , vhost_scsi_tmf_resp_work ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
2013-04-25 11:35:21 +04:00
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count + + ;
2020-11-10 08:33:23 +03:00
list_add_tail ( & tmf - > queue_entry , & tpg - > tmf_queue ) ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
vhost_scsi_hotplug ( tpg , lun ) ;
2013-04-25 11:35:21 +04:00
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2013-04-25 11:35:21 +04:00
2012-07-19 01:31:32 +04:00
return 0 ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_port_unlink ( struct se_portal_group * se_tpg ,
2013-05-06 12:38:27 +04:00
struct se_lun * lun )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2020-11-10 08:33:23 +03:00
struct vhost_scsi_tmf * tmf ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
2013-04-25 11:35:21 +04:00
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tpg - > tv_tpg_port_count - - ;
2020-11-10 08:33:23 +03:00
tmf = list_first_entry ( & tpg - > tmf_queue , struct vhost_scsi_tmf ,
queue_entry ) ;
list_del ( & tmf - > queue_entry ) ;
kfree ( tmf ) ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2013-04-25 11:35:21 +04:00
2015-02-01 10:56:53 +03:00
vhost_scsi_hotunplug ( tpg , lun ) ;
2013-04-25 11:35:21 +04:00
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
}
2015-10-03 16:32:55 +03:00
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_store (
struct config_item * item , const char * page , size_t count )
2015-03-28 10:03:51 +03:00
{
2015-10-03 16:32:55 +03:00
struct se_portal_group * se_tpg = attrib_to_tpg ( item ) ;
2015-03-28 10:03:51 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
unsigned long val ;
int ret = kstrtoul ( page , 0 , & val ) ;
if ( ret ) {
pr_err ( " kstrtoul() returned %d for fabric_prot_type \n " , ret ) ;
return ret ;
}
if ( val ! = 0 & & val ! = 1 & & val ! = 3 ) {
pr_err ( " Invalid vhost_scsi fabric_prot_type: %lu \n " , val ) ;
return - EINVAL ;
}
tpg - > tv_fabric_prot_type = val ;
return count ;
}
2015-10-03 16:32:55 +03:00
static ssize_t vhost_scsi_tpg_attrib_fabric_prot_type_show (
struct config_item * item , char * page )
2015-03-28 10:03:51 +03:00
{
2015-10-03 16:32:55 +03:00
struct se_portal_group * se_tpg = attrib_to_tpg ( item ) ;
2015-03-28 10:03:51 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
return sprintf ( page , " %d \n " , tpg - > tv_fabric_prot_type ) ;
}
2015-10-03 16:32:55 +03:00
CONFIGFS_ATTR ( vhost_scsi_tpg_attrib_ , fabric_prot_type ) ;
2015-03-28 10:03:51 +03:00
static struct configfs_attribute * vhost_scsi_tpg_attrib_attrs [ ] = {
2015-10-03 16:32:55 +03:00
& vhost_scsi_tpg_attrib_attr_fabric_prot_type ,
2015-03-28 10:03:51 +03:00
NULL ,
} ;
2016-01-09 16:47:55 +03:00
static int vhost_scsi_make_nexus ( struct vhost_scsi_tpg * tpg ,
const char * name )
{
struct vhost_scsi_nexus * tv_nexus ;
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
if ( tpg - > tpg_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_debug ( " tpg->tpg_nexus already exists \n " ) ;
return - EEXIST ;
}
2017-05-20 14:48:44 +03:00
tv_nexus = kzalloc ( sizeof ( * tv_nexus ) , GFP_KERNEL ) ;
2016-01-09 16:47:55 +03:00
if ( ! tv_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
pr_err ( " Unable to allocate struct vhost_scsi_nexus \n " ) ;
return - ENOMEM ;
}
2012-07-19 01:31:32 +04:00
/*
* Since we are running in ' demo mode ' this call with generate a
2015-02-01 10:56:53 +03:00
* struct se_node_acl for the vhost_scsi struct se_portal_group with
2012-07-19 01:31:32 +04:00
* the SCSI Initiator port name of the passed configfs group ' name ' .
*/
2020-11-10 08:33:20 +03:00
tv_nexus - > tvn_se_sess = target_setup_session ( & tpg - > se_tpg , 0 , 0 ,
2016-01-09 16:47:55 +03:00
TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS ,
2020-11-10 08:33:20 +03:00
( unsigned char * ) name , tv_nexus , NULL ) ;
2016-01-09 16:47:55 +03:00
if ( IS_ERR ( tv_nexus - > tvn_se_sess ) ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2016-01-09 16:47:55 +03:00
kfree ( tv_nexus ) ;
return - ENOMEM ;
2012-07-19 01:31:32 +04:00
}
2013-05-06 12:38:28 +04:00
tpg - > tpg_nexus = tv_nexus ;
2012-07-19 01:31:32 +04:00
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return 0 ;
}
2015-02-01 10:56:53 +03:00
static int vhost_scsi_drop_nexus ( struct vhost_scsi_tpg * tpg )
2012-07-19 01:31:32 +04:00
{
struct se_session * se_sess ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_nexus * tv_nexus ;
2012-07-19 01:31:32 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
if ( ! tv_nexus ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
se_sess = tv_nexus - > tvn_se_sess ;
if ( ! se_sess ) {
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
return - ENODEV ;
}
2012-07-31 00:30:00 +04:00
if ( tpg - > tv_tpg_port_count ! = 0 ) {
2012-07-19 01:31:32 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-31 00:30:00 +04:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-19 01:31:32 +04:00
" active TPG port count: %d \n " ,
2012-07-31 00:30:00 +04:00
tpg - > tv_tpg_port_count ) ;
return - EBUSY ;
2012-07-19 01:31:32 +04:00
}
2012-07-31 00:30:00 +04:00
if ( tpg - > tv_tpg_vhost_count ! = 0 ) {
2012-07-19 01:31:32 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-31 00:30:00 +04:00
pr_err ( " Unable to remove TCM_vhost I_T Nexus with "
2012-07-19 01:31:32 +04:00
" active TPG vhost count: %d \n " ,
2012-07-31 00:30:00 +04:00
tpg - > tv_tpg_vhost_count ) ;
return - EBUSY ;
2012-07-19 01:31:32 +04:00
}
2012-07-31 00:30:00 +04:00
pr_debug ( " TCM_vhost_ConfigFS: Removing I_T Nexus to emulated "
2015-02-01 10:56:53 +03:00
" %s Initiator Port: %s \n " , vhost_scsi_dump_proto_id ( tpg - > tport ) ,
2012-07-19 01:31:32 +04:00
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-06-22 01:32:04 +04:00
2012-07-19 01:31:32 +04:00
/*
2012-07-31 00:30:00 +04:00
* Release the SCSI I_T Nexus to the emulated vhost Target Port
2012-07-19 01:31:32 +04:00
*/
2018-08-02 20:12:27 +03:00
target_remove_session ( se_sess ) ;
2012-07-19 01:31:32 +04:00
tpg - > tpg_nexus = NULL ;
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
kfree ( tv_nexus ) ;
return 0 ;
}
2015-10-03 16:32:55 +03:00
static ssize_t vhost_scsi_tpg_nexus_show ( struct config_item * item , char * page )
2012-07-19 01:31:32 +04:00
{
2015-10-03 16:32:55 +03:00
struct se_portal_group * se_tpg = to_tpg ( item ) ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_nexus * tv_nexus ;
2012-07-19 01:31:32 +04:00
ssize_t ret ;
2013-05-06 12:38:28 +04:00
mutex_lock ( & tpg - > tv_tpg_mutex ) ;
tv_nexus = tpg - > tpg_nexus ;
2012-07-19 01:31:32 +04:00
if ( ! tv_nexus ) {
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return - ENODEV ;
}
ret = snprintf ( page , PAGE_SIZE , " %s \n " ,
tv_nexus - > tvn_se_sess - > se_node_acl - > initiatorname ) ;
2013-05-06 12:38:28 +04:00
mutex_unlock ( & tpg - > tv_tpg_mutex ) ;
2012-07-19 01:31:32 +04:00
return ret ;
}
2015-10-03 16:32:55 +03:00
static ssize_t vhost_scsi_tpg_nexus_store ( struct config_item * item ,
const char * page , size_t count )
2012-07-19 01:31:32 +04:00
{
2015-10-03 16:32:55 +03:00
struct se_portal_group * se_tpg = to_tpg ( item ) ;
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
struct vhost_scsi_tport * tport_wwn = tpg - > tport ;
unsigned char i_port [ VHOST_SCSI_NAMELEN ] , * ptr , * port_ptr ;
2012-07-19 01:31:32 +04:00
int ret ;
/*
* Shutdown the active I_T nexus if ' NULL ' is passed . .
*/
if ( ! strncmp ( page , " NULL " , 4 ) ) {
2015-02-01 10:56:53 +03:00
ret = vhost_scsi_drop_nexus ( tpg ) ;
2012-07-19 01:31:32 +04:00
return ( ! ret ) ? count : ret ;
}
/*
* Otherwise make sure the passed virtual Initiator port WWN matches
2015-02-01 10:56:53 +03:00
* the fabric protocol_id set in vhost_scsi_make_tport ( ) , and call
* vhost_scsi_make_nexus ( ) .
2012-07-19 01:31:32 +04:00
*/
2015-02-01 10:56:53 +03:00
if ( strlen ( page ) > = VHOST_SCSI_NAMELEN ) {
2012-07-19 01:31:32 +04:00
pr_err ( " Emulated NAA Sas Address: %s, exceeds "
2015-02-01 10:56:53 +03:00
" max: %d \n " , page , VHOST_SCSI_NAMELEN ) ;
2012-07-19 01:31:32 +04:00
return - EINVAL ;
}
2015-02-01 10:56:53 +03:00
snprintf ( & i_port [ 0 ] , VHOST_SCSI_NAMELEN , " %s " , page ) ;
2012-07-19 01:31:32 +04:00
ptr = strstr ( i_port , " naa. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_SAS ) {
pr_err ( " Passed SAS Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-02-01 10:56:53 +03:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-19 01:31:32 +04:00
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
ptr = strstr ( i_port , " fc. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_FCP ) {
pr_err ( " Passed FCP Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-02-01 10:56:53 +03:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-19 01:31:32 +04:00
return - EINVAL ;
}
port_ptr = & i_port [ 3 ] ; /* Skip over "fc." */
goto check_newline ;
}
ptr = strstr ( i_port , " iqn. " ) ;
if ( ptr ) {
if ( tport_wwn - > tport_proto_id ! = SCSI_PROTOCOL_ISCSI ) {
pr_err ( " Passed iSCSI Initiator Port %s does not "
" match target port protoid: %s \n " , i_port ,
2015-02-01 10:56:53 +03:00
vhost_scsi_dump_proto_id ( tport_wwn ) ) ;
2012-07-19 01:31:32 +04:00
return - EINVAL ;
}
port_ptr = & i_port [ 0 ] ;
goto check_newline ;
}
pr_err ( " Unable to locate prefix for emulated Initiator Port: "
" %s \n " , i_port ) ;
return - EINVAL ;
/*
* Clear any trailing newline for the NAA WWN
*/
check_newline :
if ( i_port [ strlen ( i_port ) - 1 ] = = ' \n ' )
i_port [ strlen ( i_port ) - 1 ] = ' \0 ' ;
2015-02-01 10:56:53 +03:00
ret = vhost_scsi_make_nexus ( tpg , port_ptr ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 )
return ret ;
return count ;
}
2015-10-03 16:32:55 +03:00
CONFIGFS_ATTR ( vhost_scsi_tpg_ , nexus ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
static struct configfs_attribute * vhost_scsi_tpg_attrs [ ] = {
2015-10-03 16:32:55 +03:00
& vhost_scsi_tpg_attr_nexus ,
2012-07-19 01:31:32 +04:00
NULL ,
} ;
2013-05-06 12:38:27 +04:00
static struct se_portal_group *
2018-06-23 00:53:02 +03:00
vhost_scsi_make_tpg ( struct se_wwn * wwn , const char * name )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport * tport = container_of ( wwn ,
struct vhost_scsi_tport , tport_wwn ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg ;
2015-02-05 10:37:33 +03:00
u16 tpgt ;
2012-07-19 01:31:32 +04:00
int ret ;
if ( strstr ( name , " tpgt_ " ) ! = name )
return ERR_PTR ( - EINVAL ) ;
2015-02-05 10:37:33 +03:00
if ( kstrtou16 ( name + 5 , 10 , & tpgt ) | | tpgt > = VHOST_SCSI_MAX_TARGET )
2012-07-19 01:31:32 +04:00
return ERR_PTR ( - EINVAL ) ;
2017-05-20 14:48:44 +03:00
tpg = kzalloc ( sizeof ( * tpg ) , GFP_KERNEL ) ;
2012-07-19 01:31:32 +04:00
if ( ! tpg ) {
2015-02-01 10:56:53 +03:00
pr_err ( " Unable to allocate struct vhost_scsi_tpg " ) ;
2012-07-19 01:31:32 +04:00
return ERR_PTR ( - ENOMEM ) ;
}
mutex_init ( & tpg - > tv_tpg_mutex ) ;
INIT_LIST_HEAD ( & tpg - > tv_tpg_list ) ;
2020-11-10 08:33:23 +03:00
INIT_LIST_HEAD ( & tpg - > tmf_queue ) ;
2012-07-19 01:31:32 +04:00
tpg - > tport = tport ;
tpg - > tport_tpgt = tpgt ;
2015-05-21 07:48:03 +03:00
ret = core_tpg_register ( wwn , & tpg - > se_tpg , tport - > tport_proto_id ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 ) {
kfree ( tpg ) ;
return NULL ;
}
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
list_add_tail ( & tpg - > tv_tpg_list , & vhost_scsi_list ) ;
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
return & tpg - > se_tpg ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_drop_tpg ( struct se_portal_group * se_tpg )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tpg * tpg = container_of ( se_tpg ,
struct vhost_scsi_tpg , se_tpg ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
mutex_lock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
list_del ( & tpg - > tv_tpg_list ) ;
2015-02-01 10:56:53 +03:00
mutex_unlock ( & vhost_scsi_mutex ) ;
2012-07-19 01:31:32 +04:00
/*
2012-07-31 00:30:00 +04:00
* Release the virtual I_T Nexus for this vhost TPG
2012-07-19 01:31:32 +04:00
*/
2015-02-01 10:56:53 +03:00
vhost_scsi_drop_nexus ( tpg ) ;
2012-07-19 01:31:32 +04:00
/*
* Deregister the se_tpg from TCM . .
*/
core_tpg_deregister ( se_tpg ) ;
kfree ( tpg ) ;
}
2013-05-06 12:38:27 +04:00
static struct se_wwn *
2015-02-01 10:56:53 +03:00
vhost_scsi_make_tport ( struct target_fabric_configfs * tf ,
2013-05-06 12:38:27 +04:00
struct config_group * group ,
const char * name )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport * tport ;
2012-07-19 01:31:32 +04:00
char * ptr ;
u64 wwpn = 0 ;
int off = 0 ;
2015-02-01 10:56:53 +03:00
/* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
2012-07-19 01:31:32 +04:00
return ERR_PTR ( - EINVAL ) ; */
2017-05-20 14:48:44 +03:00
tport = kzalloc ( sizeof ( * tport ) , GFP_KERNEL ) ;
2012-07-19 01:31:32 +04:00
if ( ! tport ) {
2015-02-01 10:56:53 +03:00
pr_err ( " Unable to allocate struct vhost_scsi_tport " ) ;
2012-07-19 01:31:32 +04:00
return ERR_PTR ( - ENOMEM ) ;
}
tport - > tport_wwpn = wwpn ;
/*
* Determine the emulated Protocol Identifier and Target Port Name
* based on the incoming configfs directory name .
*/
ptr = strstr ( name , " naa. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_SAS ;
goto check_len ;
}
ptr = strstr ( name , " fc. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_FCP ;
off = 3 ; /* Skip over "fc." */
goto check_len ;
}
ptr = strstr ( name , " iqn. " ) ;
if ( ptr ) {
tport - > tport_proto_id = SCSI_PROTOCOL_ISCSI ;
goto check_len ;
}
pr_err ( " Unable to locate prefix for emulated Target Port: "
" %s \n " , name ) ;
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
check_len :
2015-02-01 10:56:53 +03:00
if ( strlen ( name ) > = VHOST_SCSI_NAMELEN ) {
2012-07-19 01:31:32 +04:00
pr_err ( " Emulated %s Address: %s, exceeds "
2015-02-01 10:56:53 +03:00
" max: %d \n " , name , vhost_scsi_dump_proto_id ( tport ) ,
VHOST_SCSI_NAMELEN ) ;
2012-07-19 01:31:32 +04:00
kfree ( tport ) ;
return ERR_PTR ( - EINVAL ) ;
}
2015-02-01 10:56:53 +03:00
snprintf ( & tport - > tport_name [ 0 ] , VHOST_SCSI_NAMELEN , " %s " , & name [ off ] ) ;
2012-07-19 01:31:32 +04:00
pr_debug ( " TCM_VHost_ConfigFS: Allocated emulated Target "
2015-02-01 10:56:53 +03:00
" %s Address: %s \n " , vhost_scsi_dump_proto_id ( tport ) , name ) ;
2012-07-19 01:31:32 +04:00
return & tport - > tport_wwn ;
}
2015-02-01 10:56:53 +03:00
static void vhost_scsi_drop_tport ( struct se_wwn * wwn )
2012-07-19 01:31:32 +04:00
{
2015-02-01 10:56:53 +03:00
struct vhost_scsi_tport * tport = container_of ( wwn ,
struct vhost_scsi_tport , tport_wwn ) ;
2012-07-19 01:31:32 +04:00
pr_debug ( " TCM_VHost_ConfigFS: Deallocating emulated Target "
2015-02-01 10:56:53 +03:00
" %s Address: %s \n " , vhost_scsi_dump_proto_id ( tport ) ,
2012-07-19 01:31:32 +04:00
tport - > tport_name ) ;
kfree ( tport ) ;
}
2013-05-06 12:38:27 +04:00
static ssize_t
2015-10-03 16:32:55 +03:00
vhost_scsi_wwn_version_show ( struct config_item * item , char * page )
2012-07-19 01:31:32 +04:00
{
return sprintf ( page , " TCM_VHOST fabric module %s on %s/%s "
2015-02-01 10:56:53 +03:00
" on " UTS_RELEASE " \n " , VHOST_SCSI_VERSION , utsname ( ) - > sysname ,
2012-07-19 01:31:32 +04:00
utsname ( ) - > machine ) ;
}
2015-10-03 16:32:55 +03:00
CONFIGFS_ATTR_RO ( vhost_scsi_wwn_ , version ) ;
2012-07-19 01:31:32 +04:00
2015-02-01 10:56:53 +03:00
static struct configfs_attribute * vhost_scsi_wwn_attrs [ ] = {
2015-10-03 16:32:55 +03:00
& vhost_scsi_wwn_attr_version ,
2012-07-19 01:31:32 +04:00
NULL ,
} ;
2017-01-09 18:21:02 +03:00
static const struct target_core_fabric_ops vhost_scsi_ops = {
2015-04-08 21:01:35 +03:00
. module = THIS_MODULE ,
2018-11-23 20:36:12 +03:00
. fabric_name = " vhost " ,
2020-05-22 19:51:57 +03:00
. max_data_sg_nents = VHOST_SCSI_PREALLOC_SGLS ,
2015-02-01 10:56:53 +03:00
. tpg_get_wwn = vhost_scsi_get_fabric_wwn ,
. tpg_get_tag = vhost_scsi_get_tpgt ,
. tpg_check_demo_mode = vhost_scsi_check_true ,
. tpg_check_demo_mode_cache = vhost_scsi_check_true ,
. tpg_check_demo_mode_write_protect = vhost_scsi_check_false ,
. tpg_check_prod_mode_write_protect = vhost_scsi_check_false ,
2015-03-28 10:03:51 +03:00
. tpg_check_prot_fabric_only = vhost_scsi_check_prot_fabric_only ,
2015-02-01 10:56:53 +03:00
. tpg_get_inst_index = vhost_scsi_tpg_get_inst_index ,
. release_cmd = vhost_scsi_release_cmd ,
2013-06-06 13:20:41 +04:00
. check_stop_free = vhost_scsi_check_stop_free ,
2015-02-01 10:56:53 +03:00
. sess_get_index = vhost_scsi_sess_get_index ,
2012-07-19 01:31:32 +04:00
. sess_get_initiator_sid = NULL ,
2015-02-01 10:56:53 +03:00
. write_pending = vhost_scsi_write_pending ,
. set_default_node_attributes = vhost_scsi_set_default_node_attrs ,
. get_cmd_state = vhost_scsi_get_cmd_state ,
. queue_data_in = vhost_scsi_queue_data_in ,
. queue_status = vhost_scsi_queue_status ,
. queue_tm_rsp = vhost_scsi_queue_tm_rsp ,
. aborted_task = vhost_scsi_aborted_task ,
2012-07-19 01:31:32 +04:00
/*
* Setup callers for generic logic in target_core_fabric_configfs . c
*/
2015-02-01 10:56:53 +03:00
. fabric_make_wwn = vhost_scsi_make_tport ,
. fabric_drop_wwn = vhost_scsi_drop_tport ,
. fabric_make_tpg = vhost_scsi_make_tpg ,
. fabric_drop_tpg = vhost_scsi_drop_tpg ,
. fabric_post_link = vhost_scsi_port_link ,
. fabric_pre_unlink = vhost_scsi_port_unlink ,
2015-04-08 21:01:35 +03:00
. tfc_wwn_attrs = vhost_scsi_wwn_attrs ,
. tfc_tpg_base_attrs = vhost_scsi_tpg_attrs ,
. tfc_tpg_attrib_attrs = vhost_scsi_tpg_attrib_attrs ,
2012-07-19 01:31:32 +04:00
} ;
2015-04-08 21:01:35 +03:00
static int __init vhost_scsi_init ( void )
2012-07-19 01:31:32 +04:00
{
2015-04-08 21:01:35 +03:00
int ret = - ENOMEM ;
2012-07-19 01:31:32 +04:00
2015-04-08 21:01:35 +03:00
pr_debug ( " TCM_VHOST fabric module %s on %s/%s "
2015-02-01 10:56:53 +03:00
" on " UTS_RELEASE " \n " , VHOST_SCSI_VERSION , utsname ( ) - > sysname ,
2012-07-19 01:31:32 +04:00
utsname ( ) - > machine ) ;
ret = vhost_scsi_register ( ) ;
if ( ret < 0 )
2021-02-27 19:59:58 +03:00
goto out ;
2012-07-19 01:31:32 +04:00
2015-04-08 21:01:35 +03:00
ret = target_register_template ( & vhost_scsi_ops ) ;
2012-07-19 01:31:32 +04:00
if ( ret < 0 )
goto out_vhost_scsi_deregister ;
return 0 ;
out_vhost_scsi_deregister :
vhost_scsi_deregister ( ) ;
out :
return ret ;
} ;
2015-02-01 10:56:53 +03:00
static void vhost_scsi_exit ( void )
2012-07-19 01:31:32 +04:00
{
2015-04-08 21:01:35 +03:00
target_unregister_template ( & vhost_scsi_ops ) ;
2012-07-19 01:31:32 +04:00
vhost_scsi_deregister ( ) ;
} ;
2013-05-02 04:52:59 +04:00
MODULE_DESCRIPTION ( " VHOST_SCSI series fabric driver " ) ;
MODULE_ALIAS ( " tcm_vhost " ) ;
2012-07-19 01:31:32 +04:00
MODULE_LICENSE ( " GPL " ) ;
2015-02-01 10:56:53 +03:00
module_init ( vhost_scsi_init ) ;
module_exit ( vhost_scsi_exit ) ;