2017-11-14 18:38:06 +01:00
// SPDX-License-Identifier: GPL-2.0
2012-12-14 17:02:18 +01:00
/*
* ccw based virtio transport
*
2013-02-06 10:23:39 +01:00
* Copyright IBM Corp . 2012 , 2014
2012-12-14 17:02:18 +01:00
*
* Author ( s ) : Cornelia Huck < cornelia . huck @ de . ibm . com >
*/
# include <linux/kernel_stat.h>
# include <linux/init.h>
2018-10-30 15:09:49 -07:00
# include <linux/memblock.h>
2012-12-14 17:02:18 +01:00
# include <linux/err.h>
# include <linux/virtio.h>
# include <linux/virtio_config.h>
# include <linux/slab.h>
# include <linux/interrupt.h>
# include <linux/virtio_ring.h>
# include <linux/pfn.h>
# include <linux/async.h>
# include <linux/wait.h>
# include <linux/list.h>
# include <linux/bitops.h>
2016-10-30 16:37:32 -04:00
# include <linux/moduleparam.h>
2012-12-14 17:02:18 +01:00
# include <linux/io.h>
# include <linux/kvm_para.h>
2014-04-28 11:24:05 +09:30
# include <linux/notifier.h>
2015-08-20 17:28:44 +02:00
# include <asm/diag.h>
2012-12-14 17:02:18 +01:00
# include <asm/setup.h>
# include <asm/irq.h>
# include <asm/cio.h>
# include <asm/ccwdev.h>
2013-02-28 12:33:17 +01:00
# include <asm/virtio-ccw.h>
2013-02-06 10:23:39 +01:00
# include <asm/isc.h>
# include <asm/airq.h>
2012-12-14 17:02:18 +01:00
/*
* virtio related functions
*/
struct vq_config_block {
__u16 index ;
__u16 num ;
} __packed ;
# define VIRTIO_CCW_CONFIG_SIZE 0x100
/* same as PCI config space size, should be enough for all drivers */
2018-10-01 19:01:58 +02:00
struct vcdev_dma_area {
unsigned long indicators ;
unsigned long indicators2 ;
struct vq_config_block config_block ;
__u8 status ;
} ;
2012-12-14 17:02:18 +01:00
struct virtio_ccw_device {
struct virtio_device vdev ;
__u8 config [ VIRTIO_CCW_CONFIG_SIZE ] ;
struct ccw_device * cdev ;
__u32 curr_io ;
int err ;
2014-10-07 16:39:50 +02:00
unsigned int revision ; /* Transport revision */
2012-12-14 17:02:18 +01:00
wait_queue_head_t wait_q ;
spinlock_t lock ;
2018-09-26 18:48:30 +02:00
struct mutex io_lock ; /* Serializes I/O requests */
2012-12-14 17:02:18 +01:00
struct list_head virtqueues ;
2013-02-06 10:23:39 +01:00
bool is_thinint ;
2014-03-05 15:23:54 +01:00
bool going_away ;
2014-04-28 11:24:05 +09:30
bool device_lost ;
2015-06-29 16:44:01 +02:00
unsigned int config_ready ;
2013-02-06 10:23:39 +01:00
void * airq_info ;
2018-10-01 19:01:58 +02:00
struct vcdev_dma_area * dma_area ;
2012-12-14 17:02:18 +01:00
} ;
2018-12-03 17:18:07 +01:00
static inline unsigned long * indicators ( struct virtio_ccw_device * vcdev )
{
2018-10-01 19:01:58 +02:00
return & vcdev - > dma_area - > indicators ;
2018-12-03 17:18:07 +01:00
}
static inline unsigned long * indicators2 ( struct virtio_ccw_device * vcdev )
{
2018-10-01 19:01:58 +02:00
return & vcdev - > dma_area - > indicators2 ;
2018-12-03 17:18:07 +01:00
}
2014-10-07 16:39:51 +02:00
struct vq_info_block_legacy {
2012-12-14 17:02:18 +01:00
__u64 queue ;
__u32 align ;
__u16 index ;
__u16 num ;
} __packed ;
2014-10-07 16:39:51 +02:00
struct vq_info_block {
__u64 desc ;
__u32 res0 ;
__u16 index ;
__u16 num ;
__u64 avail ;
__u64 used ;
} __packed ;
2012-12-14 17:02:18 +01:00
struct virtio_feature_desc {
2017-05-09 12:50:53 +02:00
__le32 features ;
2012-12-14 17:02:18 +01:00
__u8 index ;
} __packed ;
2013-02-06 10:23:39 +01:00
struct virtio_thinint_area {
unsigned long summary_indicator ;
unsigned long indicator ;
u64 bit_nr ;
u8 isc ;
} __packed ;
2014-10-07 16:39:50 +02:00
struct virtio_rev_info {
__u16 revision ;
__u16 length ;
__u8 data [ ] ;
} ;
/* the highest virtio-ccw revision we support */
2014-10-07 16:39:52 +02:00
# define VIRTIO_CCW_REV_MAX 1
2014-10-07 16:39:50 +02:00
2012-12-14 17:02:18 +01:00
struct virtio_ccw_vq_info {
struct virtqueue * vq ;
int num ;
2014-10-07 16:39:51 +02:00
union {
struct vq_info_block s ;
struct vq_info_block_legacy l ;
} * info_block ;
2013-02-06 10:23:39 +01:00
int bit_nr ;
2012-12-14 17:02:18 +01:00
struct list_head node ;
2013-02-28 12:33:16 +01:00
long cookie ;
2012-12-14 17:02:18 +01:00
} ;
2013-02-06 10:23:39 +01:00
# define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */
# define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8)
# define MAX_AIRQ_AREAS 20
static int virtio_ccw_use_airq = 1 ;
struct airq_info {
rwlock_t lock ;
2019-03-26 19:03:47 +01:00
u8 summary_indicator_idx ;
2013-02-06 10:23:39 +01:00
struct airq_struct airq ;
struct airq_iv * aiv ;
} ;
static struct airq_info * airq_areas [ MAX_AIRQ_AREAS ] ;
2019-07-23 17:11:01 +02:00
static DEFINE_MUTEX ( airq_areas_lock ) ;
2019-03-26 19:03:47 +01:00
static u8 * summary_indicators ;
static inline u8 * get_summary_indicator ( struct airq_info * info )
{
return summary_indicators + info - > summary_indicator_idx ;
}
2013-02-06 10:23:39 +01:00
2012-12-14 17:02:18 +01:00
# define CCW_CMD_SET_VQ 0x13
# define CCW_CMD_VDEV_RESET 0x33
# define CCW_CMD_SET_IND 0x43
# define CCW_CMD_SET_CONF_IND 0x53
# define CCW_CMD_READ_FEAT 0x12
# define CCW_CMD_WRITE_FEAT 0x11
# define CCW_CMD_READ_CONF 0x22
# define CCW_CMD_WRITE_CONF 0x21
# define CCW_CMD_WRITE_STATUS 0x31
# define CCW_CMD_READ_VQ_CONF 0x32
2015-08-28 11:09:32 +02:00
# define CCW_CMD_READ_STATUS 0x72
2013-02-06 10:23:39 +01:00
# define CCW_CMD_SET_IND_ADAPTER 0x73
2014-10-07 16:39:50 +02:00
# define CCW_CMD_SET_VIRTIO_REV 0x83
2012-12-14 17:02:18 +01:00
# define VIRTIO_CCW_DOING_SET_VQ 0x00010000
# define VIRTIO_CCW_DOING_RESET 0x00040000
# define VIRTIO_CCW_DOING_READ_FEAT 0x00080000
# define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000
# define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000
# define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000
# define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000
# define VIRTIO_CCW_DOING_SET_IND 0x01000000
# define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000
# define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
2013-02-06 10:23:39 +01:00
# define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
2014-10-07 16:39:50 +02:00
# define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
2015-08-28 11:09:32 +02:00
# define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
2012-12-14 17:02:18 +01:00
# define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device * to_vc_device ( struct virtio_device * vdev )
{
return container_of ( vdev , struct virtio_ccw_device , vdev ) ;
}
2013-02-06 10:23:39 +01:00
static void drop_airq_indicator ( struct virtqueue * vq , struct airq_info * info )
{
unsigned long i , flags ;
write_lock_irqsave ( & info - > lock , flags ) ;
for ( i = 0 ; i < airq_iv_end ( info - > aiv ) ; i + + ) {
if ( vq = = ( void * ) airq_iv_get_ptr ( info - > aiv , i ) ) {
airq_iv_free_bit ( info - > aiv , i ) ;
airq_iv_set_ptr ( info - > aiv , i , 0 ) ;
break ;
}
}
write_unlock_irqrestore ( & info - > lock , flags ) ;
}
2018-10-28 11:51:56 +01:00
static void virtio_airq_handler ( struct airq_struct * airq , bool floating )
2013-02-06 10:23:39 +01:00
{
struct airq_info * info = container_of ( airq , struct airq_info , airq ) ;
unsigned long ai ;
inc_irq_stat ( IRQIO_VAI ) ;
read_lock ( & info - > lock ) ;
/* Walk through indicators field, summary indicator active. */
for ( ai = 0 ; ; ) {
ai = airq_iv_scan ( info - > aiv , ai , airq_iv_end ( info - > aiv ) ) ;
if ( ai = = - 1UL )
break ;
vring_interrupt ( 0 , ( void * ) airq_iv_get_ptr ( info - > aiv , ai ) ) ;
}
2019-03-26 19:03:47 +01:00
* ( get_summary_indicator ( info ) ) = 0 ;
2013-02-06 10:23:39 +01:00
smp_wmb ( ) ;
/* Walk through indicators field, summary indicator not active. */
for ( ai = 0 ; ; ) {
ai = airq_iv_scan ( info - > aiv , ai , airq_iv_end ( info - > aiv ) ) ;
if ( ai = = - 1UL )
break ;
vring_interrupt ( 0 , ( void * ) airq_iv_get_ptr ( info - > aiv , ai ) ) ;
}
read_unlock ( & info - > lock ) ;
}
2019-03-26 19:03:47 +01:00
static struct airq_info * new_airq_info ( int index )
2013-02-06 10:23:39 +01:00
{
struct airq_info * info ;
int rc ;
info = kzalloc ( sizeof ( * info ) , GFP_KERNEL ) ;
if ( ! info )
return NULL ;
rwlock_init ( & info - > lock ) ;
2019-05-23 16:50:07 +02:00
info - > aiv = airq_iv_create ( VIRTIO_IV_BITS , AIRQ_IV_ALLOC | AIRQ_IV_PTR
| AIRQ_IV_CACHELINE ) ;
2013-02-06 10:23:39 +01:00
if ( ! info - > aiv ) {
kfree ( info ) ;
return NULL ;
}
info - > airq . handler = virtio_airq_handler ;
2019-03-26 19:03:47 +01:00
info - > summary_indicator_idx = index ;
info - > airq . lsi_ptr = get_summary_indicator ( info ) ;
2013-02-06 10:23:39 +01:00
info - > airq . lsi_mask = 0xff ;
info - > airq . isc = VIRTIO_AIRQ_ISC ;
rc = register_adapter_interrupt ( & info - > airq ) ;
if ( rc ) {
airq_iv_release ( info - > aiv ) ;
kfree ( info ) ;
return NULL ;
}
return info ;
}
static unsigned long get_airq_indicator ( struct virtqueue * vqs [ ] , int nvqs ,
u64 * first , void * * airq_info )
{
int i , j ;
struct airq_info * info ;
unsigned long indicator_addr = 0 ;
unsigned long bit , flags ;
for ( i = 0 ; i < MAX_AIRQ_AREAS & & ! indicator_addr ; i + + ) {
2019-07-23 17:11:01 +02:00
mutex_lock ( & airq_areas_lock ) ;
2013-02-06 10:23:39 +01:00
if ( ! airq_areas [ i ] )
2019-03-26 19:03:47 +01:00
airq_areas [ i ] = new_airq_info ( i ) ;
2013-02-06 10:23:39 +01:00
info = airq_areas [ i ] ;
2019-07-23 17:11:01 +02:00
mutex_unlock ( & airq_areas_lock ) ;
2013-02-06 10:23:39 +01:00
if ( ! info )
return 0 ;
write_lock_irqsave ( & info - > lock , flags ) ;
bit = airq_iv_alloc ( info - > aiv , nvqs ) ;
if ( bit = = - 1UL ) {
/* Not enough vacancies. */
write_unlock_irqrestore ( & info - > lock , flags ) ;
continue ;
}
* first = bit ;
* airq_info = info ;
indicator_addr = ( unsigned long ) info - > aiv - > vector ;
for ( j = 0 ; j < nvqs ; j + + ) {
airq_iv_set_ptr ( info - > aiv , bit + j ,
( unsigned long ) vqs [ j ] ) ;
}
write_unlock_irqrestore ( & info - > lock , flags ) ;
}
return indicator_addr ;
}
static void virtio_ccw_drop_indicators ( struct virtio_ccw_device * vcdev )
{
struct virtio_ccw_vq_info * info ;
2019-01-21 13:19:43 +01:00
if ( ! vcdev - > airq_info )
return ;
2013-02-06 10:23:39 +01:00
list_for_each_entry ( info , & vcdev - > virtqueues , node )
drop_airq_indicator ( info - > vq , vcdev - > airq_info ) ;
}
2012-12-14 17:02:18 +01:00
static int doing_io ( struct virtio_ccw_device * vcdev , __u32 flag )
{
unsigned long flags ;
__u32 ret ;
spin_lock_irqsave ( get_ccwdev_lock ( vcdev - > cdev ) , flags ) ;
if ( vcdev - > err )
ret = 0 ;
else
ret = vcdev - > curr_io & flag ;
spin_unlock_irqrestore ( get_ccwdev_lock ( vcdev - > cdev ) , flags ) ;
return ret ;
}
2013-01-07 15:51:51 +01:00
static int ccw_io_helper ( struct virtio_ccw_device * vcdev ,
struct ccw1 * ccw , __u32 intparm )
2012-12-14 17:02:18 +01:00
{
int ret ;
unsigned long flags ;
int flag = intparm & VIRTIO_CCW_INTPARM_MASK ;
2018-09-26 18:48:30 +02:00
mutex_lock ( & vcdev - > io_lock ) ;
2013-01-07 15:51:52 +01:00
do {
spin_lock_irqsave ( get_ccwdev_lock ( vcdev - > cdev ) , flags ) ;
ret = ccw_device_start ( vcdev - > cdev , ccw , intparm , 0 , 0 ) ;
2013-04-04 10:25:06 +02:00
if ( ! ret ) {
if ( ! vcdev - > curr_io )
vcdev - > err = 0 ;
2013-01-07 15:51:52 +01:00
vcdev - > curr_io | = flag ;
2013-04-04 10:25:06 +02:00
}
2013-01-07 15:51:52 +01:00
spin_unlock_irqrestore ( get_ccwdev_lock ( vcdev - > cdev ) , flags ) ;
cpu_relax ( ) ;
} while ( ret = = - EBUSY ) ;
2012-12-14 17:02:18 +01:00
wait_event ( vcdev - > wait_q , doing_io ( vcdev , flag ) = = 0 ) ;
2018-09-26 18:48:30 +02:00
ret = ret ? ret : vcdev - > err ;
mutex_unlock ( & vcdev - > io_lock ) ;
return ret ;
2012-12-14 17:02:18 +01:00
}
2013-02-06 10:23:39 +01:00
static void virtio_ccw_drop_indicator ( struct virtio_ccw_device * vcdev ,
struct ccw1 * ccw )
{
int ret ;
unsigned long * indicatorp = NULL ;
struct virtio_thinint_area * thinint_area = NULL ;
struct airq_info * airq_info = vcdev - > airq_info ;
if ( vcdev - > is_thinint ) {
2018-10-01 19:01:58 +02:00
thinint_area = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( * thinint_area ) ) ;
2013-02-06 10:23:39 +01:00
if ( ! thinint_area )
return ;
thinint_area - > summary_indicator =
2019-03-26 19:03:47 +01:00
( unsigned long ) get_summary_indicator ( airq_info ) ;
2013-02-06 10:23:39 +01:00
thinint_area - > isc = VIRTIO_AIRQ_ISC ;
ccw - > cmd_code = CCW_CMD_SET_IND_ADAPTER ;
ccw - > count = sizeof ( * thinint_area ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) thinint_area ;
} else {
2016-03-01 13:44:53 +01:00
/* payload is the address of the indicators */
2018-10-01 19:01:58 +02:00
indicatorp = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( indicators ( vcdev ) ) ) ;
2013-02-06 10:23:39 +01:00
if ( ! indicatorp )
return ;
* indicatorp = 0 ;
ccw - > cmd_code = CCW_CMD_SET_IND ;
2018-12-03 17:18:07 +01:00
ccw - > count = sizeof ( indicators ( vcdev ) ) ;
2013-02-06 10:23:39 +01:00
ccw - > cda = ( __u32 ) ( unsigned long ) indicatorp ;
}
/* Deregister indicators from host. */
2018-12-03 17:18:07 +01:00
* indicators ( vcdev ) = 0 ;
2013-02-06 10:23:39 +01:00
ccw - > flags = 0 ;
ret = ccw_io_helper ( vcdev , ccw ,
vcdev - > is_thinint ?
VIRTIO_CCW_DOING_SET_IND_ADAPTER :
VIRTIO_CCW_DOING_SET_IND ) ;
if ( ret & & ( ret ! = - ENODEV ) )
dev_info ( & vcdev - > cdev - > dev ,
" Failed to deregister indicators (%d) \n " , ret ) ;
else if ( vcdev - > is_thinint )
virtio_ccw_drop_indicators ( vcdev ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , indicatorp , sizeof ( indicators ( vcdev ) ) ) ;
ccw_device_dma_free ( vcdev - > cdev , thinint_area , sizeof ( * thinint_area ) ) ;
2013-02-06 10:23:39 +01:00
}
2015-08-20 17:28:44 +02:00
static inline long __do_kvm_notify ( struct subchannel_id schid ,
unsigned long queue_index ,
long cookie )
2012-12-14 17:02:18 +01:00
{
register unsigned long __nr asm ( " 1 " ) = KVM_S390_VIRTIO_CCW_NOTIFY ;
register struct subchannel_id __schid asm ( " 2 " ) = schid ;
register unsigned long __index asm ( " 3 " ) = queue_index ;
register long __rc asm ( " 2 " ) ;
2013-02-28 12:33:16 +01:00
register long __cookie asm ( " 4 " ) = cookie ;
2012-12-14 17:02:18 +01:00
asm volatile ( " diag 2,4,0x500 \n "
2013-02-28 12:33:16 +01:00
: " =d " ( __rc ) : " d " ( __nr ) , " d " ( __schid ) , " d " ( __index ) ,
" d " ( __cookie )
2012-12-14 17:02:18 +01:00
: " memory " , " cc " ) ;
return __rc ;
}
2015-08-20 17:28:44 +02:00
static inline long do_kvm_notify ( struct subchannel_id schid ,
unsigned long queue_index ,
long cookie )
{
diag_stat_inc ( DIAG_STAT_X500 ) ;
return __do_kvm_notify ( schid , queue_index , cookie ) ;
}
2013-10-29 09:38:50 +10:30
static bool virtio_ccw_kvm_notify ( struct virtqueue * vq )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_vq_info * info = vq - > priv ;
struct virtio_ccw_device * vcdev ;
struct subchannel_id schid ;
vcdev = to_vc_device ( info - > vq - > vdev ) ;
ccw_device_get_schid ( vcdev - > cdev , & schid ) ;
2013-05-05 14:47:31 -07:00
info - > cookie = do_kvm_notify ( schid , vq - > index , info - > cookie ) ;
2013-10-29 09:38:50 +10:30
if ( info - > cookie < 0 )
return false ;
return true ;
2012-12-14 17:02:18 +01:00
}
2013-01-07 15:51:51 +01:00
static int virtio_ccw_read_vq_conf ( struct virtio_ccw_device * vcdev ,
struct ccw1 * ccw , int index )
2012-12-14 17:02:18 +01:00
{
2015-09-10 16:35:08 +02:00
int ret ;
2018-10-01 19:01:58 +02:00
vcdev - > dma_area - > config_block . index = index ;
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_READ_VQ_CONF ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( struct vq_config_block ) ;
2018-10-01 19:01:58 +02:00
ccw - > cda = ( __u32 ) ( unsigned long ) ( & vcdev - > dma_area - > config_block ) ;
2015-09-10 16:35:08 +02:00
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_READ_VQ_CONF ) ;
if ( ret )
return ret ;
2018-10-01 19:01:58 +02:00
return vcdev - > dma_area - > config_block . num ? : - ENOENT ;
2012-12-14 17:02:18 +01:00
}
2013-01-07 15:51:51 +01:00
static void virtio_ccw_del_vq ( struct virtqueue * vq , struct ccw1 * ccw )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_device * vcdev = to_vc_device ( vq - > vdev ) ;
struct virtio_ccw_vq_info * info = vq - > priv ;
unsigned long flags ;
int ret ;
2013-03-21 14:17:34 +00:00
unsigned int index = vq - > index ;
2012-12-14 17:02:18 +01:00
/* Remove from our list. */
spin_lock_irqsave ( & vcdev - > lock , flags ) ;
list_del ( & info - > node ) ;
spin_unlock_irqrestore ( & vcdev - > lock , flags ) ;
/* Release from host. */
2014-10-07 16:39:51 +02:00
if ( vcdev - > revision = = 0 ) {
info - > info_block - > l . queue = 0 ;
info - > info_block - > l . align = 0 ;
info - > info_block - > l . index = index ;
info - > info_block - > l . num = 0 ;
ccw - > count = sizeof ( info - > info_block - > l ) ;
} else {
info - > info_block - > s . desc = 0 ;
info - > info_block - > s . index = index ;
info - > info_block - > s . num = 0 ;
info - > info_block - > s . avail = 0 ;
info - > info_block - > s . used = 0 ;
ccw - > count = sizeof ( info - > info_block - > s ) ;
}
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_SET_VQ ;
ccw - > flags = 0 ;
ccw - > cda = ( __u32 ) ( unsigned long ) ( info - > info_block ) ;
ret = ccw_io_helper ( vcdev , ccw ,
VIRTIO_CCW_DOING_SET_VQ | index ) ;
2012-12-14 17:02:18 +01:00
/*
* - ENODEV isn ' t considered an error : The device is gone anyway .
* This may happen on device detach .
*/
if ( ret & & ( ret ! = - ENODEV ) )
2016-09-27 13:08:44 -07:00
dev_warn ( & vq - > vdev - > dev , " Error %d while deleting queue %d \n " ,
2012-12-14 17:02:18 +01:00
ret , index ) ;
vring_del_virtqueue ( vq ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , info - > info_block ,
sizeof ( * info - > info_block ) ) ;
2012-12-14 17:02:18 +01:00
kfree ( info ) ;
}
static void virtio_ccw_del_vqs ( struct virtio_device * vdev )
{
struct virtqueue * vq , * n ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
2013-02-06 10:23:39 +01:00
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2013-01-07 15:51:51 +01:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return ;
2013-02-06 10:23:39 +01:00
virtio_ccw_drop_indicator ( vcdev , ccw ) ;
2012-12-14 17:02:18 +01:00
list_for_each_entry_safe ( vq , n , & vdev - > vqs , list )
2013-01-07 15:51:51 +01:00
virtio_ccw_del_vq ( vq , ccw ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
}
static struct virtqueue * virtio_ccw_setup_vq ( struct virtio_device * vdev ,
int i , vq_callback_t * callback ,
2017-03-06 18:32:29 +02:00
const char * name , bool ctx ,
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
int err ;
2013-01-25 15:34:16 +01:00
struct virtqueue * vq = NULL ;
2012-12-14 17:02:18 +01:00
struct virtio_ccw_vq_info * info ;
2019-04-26 20:32:36 +02:00
u64 queue ;
2012-12-14 17:02:18 +01:00
unsigned long flags ;
2019-04-26 20:32:36 +02:00
bool may_reduce ;
2012-12-14 17:02:18 +01:00
/* Allocate queue. */
info = kzalloc ( sizeof ( struct virtio_ccw_vq_info ) , GFP_KERNEL ) ;
if ( ! info ) {
dev_warn ( & vcdev - > cdev - > dev , " no info \n " ) ;
err = - ENOMEM ;
goto out_err ;
}
2018-10-01 19:01:58 +02:00
info - > info_block = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( * info - > info_block ) ) ;
2012-12-14 17:02:18 +01:00
if ( ! info - > info_block ) {
dev_warn ( & vcdev - > cdev - > dev , " no info block \n " ) ;
err = - ENOMEM ;
goto out_err ;
}
2013-01-07 15:51:51 +01:00
info - > num = virtio_ccw_read_vq_conf ( vcdev , ccw , i ) ;
2015-09-10 16:35:08 +02:00
if ( info - > num < 0 ) {
err = info - > num ;
goto out_err ;
}
2019-04-26 20:32:36 +02:00
may_reduce = vcdev - > revision > 0 ;
vq = vring_create_virtqueue ( i , info - > num , KVM_VIRTIO_CCW_RING_ALIGN ,
vdev , true , may_reduce , ctx ,
virtio_ccw_kvm_notify , callback , name ) ;
2012-12-14 17:02:18 +01:00
if ( ! vq ) {
/* For now, we fail if we can't get the requested size. */
dev_warn ( & vcdev - > cdev - > dev , " no vq \n " ) ;
err = - ENOMEM ;
goto out_err ;
}
2019-04-26 20:32:36 +02:00
/* it may have been reduced */
info - > num = virtqueue_get_vring_size ( vq ) ;
2012-12-14 17:02:18 +01:00
/* Register it with the host. */
2019-04-26 20:32:36 +02:00
queue = virtqueue_get_desc_addr ( vq ) ;
2014-10-07 16:39:51 +02:00
if ( vcdev - > revision = = 0 ) {
2019-04-26 20:32:36 +02:00
info - > info_block - > l . queue = queue ;
2014-10-07 16:39:51 +02:00
info - > info_block - > l . align = KVM_VIRTIO_CCW_RING_ALIGN ;
info - > info_block - > l . index = i ;
info - > info_block - > l . num = info - > num ;
ccw - > count = sizeof ( info - > info_block - > l ) ;
} else {
2019-04-26 20:32:36 +02:00
info - > info_block - > s . desc = queue ;
2014-10-07 16:39:51 +02:00
info - > info_block - > s . index = i ;
info - > info_block - > s . num = info - > num ;
2019-04-26 20:32:37 +02:00
info - > info_block - > s . avail = ( __u64 ) virtqueue_get_avail_addr ( vq ) ;
info - > info_block - > s . used = ( __u64 ) virtqueue_get_used_addr ( vq ) ;
2014-10-07 16:39:51 +02:00
ccw - > count = sizeof ( info - > info_block - > s ) ;
}
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_SET_VQ ;
ccw - > flags = 0 ;
ccw - > cda = ( __u32 ) ( unsigned long ) ( info - > info_block ) ;
err = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_SET_VQ | i ) ;
2012-12-14 17:02:18 +01:00
if ( err ) {
dev_warn ( & vcdev - > cdev - > dev , " SET_VQ failed \n " ) ;
goto out_err ;
}
2013-01-25 15:34:16 +01:00
info - > vq = vq ;
vq - > priv = info ;
2012-12-14 17:02:18 +01:00
/* Save it to our list. */
spin_lock_irqsave ( & vcdev - > lock , flags ) ;
list_add ( & info - > node , & vcdev - > virtqueues ) ;
spin_unlock_irqrestore ( & vcdev - > lock , flags ) ;
return vq ;
out_err :
2013-01-25 15:34:16 +01:00
if ( vq )
vring_del_virtqueue ( vq ) ;
if ( info ) {
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , info - > info_block ,
sizeof ( * info - > info_block ) ) ;
2013-01-25 15:34:16 +01:00
}
2012-12-14 17:02:18 +01:00
kfree ( info ) ;
return ERR_PTR ( err ) ;
}
2013-02-06 10:23:39 +01:00
static int virtio_ccw_register_adapter_ind ( struct virtio_ccw_device * vcdev ,
struct virtqueue * vqs [ ] , int nvqs ,
struct ccw1 * ccw )
{
int ret ;
struct virtio_thinint_area * thinint_area = NULL ;
struct airq_info * info ;
2018-10-01 19:01:58 +02:00
thinint_area = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( * thinint_area ) ) ;
2013-02-06 10:23:39 +01:00
if ( ! thinint_area ) {
ret = - ENOMEM ;
goto out ;
}
/* Try to get an indicator. */
thinint_area - > indicator = get_airq_indicator ( vqs , nvqs ,
& thinint_area - > bit_nr ,
& vcdev - > airq_info ) ;
if ( ! thinint_area - > indicator ) {
ret = - ENOSPC ;
goto out ;
}
info = vcdev - > airq_info ;
thinint_area - > summary_indicator =
2019-03-26 19:03:47 +01:00
( unsigned long ) get_summary_indicator ( info ) ;
2013-02-06 10:23:39 +01:00
thinint_area - > isc = VIRTIO_AIRQ_ISC ;
ccw - > cmd_code = CCW_CMD_SET_IND_ADAPTER ;
ccw - > flags = CCW_FLAG_SLI ;
ccw - > count = sizeof ( * thinint_area ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) thinint_area ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_SET_IND_ADAPTER ) ;
if ( ret ) {
if ( ret = = - EOPNOTSUPP ) {
/*
* The host does not support adapter interrupts
* for virtio - ccw , stop trying .
*/
virtio_ccw_use_airq = 0 ;
pr_info ( " Adapter interrupts unsupported on host \n " ) ;
} else
dev_warn ( & vcdev - > cdev - > dev ,
" enabling adapter interrupts = %d \n " , ret ) ;
virtio_ccw_drop_indicators ( vcdev ) ;
}
out :
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , thinint_area , sizeof ( * thinint_area ) ) ;
2013-02-06 10:23:39 +01:00
return ret ;
}
2012-12-14 17:02:18 +01:00
static int virtio_ccw_find_vqs ( struct virtio_device * vdev , unsigned nvqs ,
struct virtqueue * vqs [ ] ,
vq_callback_t * callbacks [ ] ,
2017-02-05 18:15:22 +01:00
const char * const names [ ] ,
2017-03-06 18:32:29 +02:00
const bool * ctx ,
2017-02-05 18:15:22 +01:00
struct irq_affinity * desc )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
unsigned long * indicatorp = NULL ;
2018-12-28 10:26:26 +08:00
int ret , i , queue_idx = 0 ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return - ENOMEM ;
2012-12-14 17:02:18 +01:00
for ( i = 0 ; i < nvqs ; + + i ) {
2018-12-28 10:26:26 +08:00
if ( ! names [ i ] ) {
vqs [ i ] = NULL ;
continue ;
}
vqs [ i ] = virtio_ccw_setup_vq ( vdev , queue_idx + + , callbacks [ i ] ,
names [ i ] , ctx ? ctx [ i ] : false ,
ccw ) ;
2012-12-14 17:02:18 +01:00
if ( IS_ERR ( vqs [ i ] ) ) {
ret = PTR_ERR ( vqs [ i ] ) ;
vqs [ i ] = NULL ;
goto out ;
}
}
ret = - ENOMEM ;
2016-03-01 13:44:53 +01:00
/*
* We need a data area under 2 G to communicate . Our payload is
* the address of the indicators .
*/
2018-10-01 19:01:58 +02:00
indicatorp = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( indicators ( vcdev ) ) ) ;
2012-12-14 17:02:18 +01:00
if ( ! indicatorp )
goto out ;
2018-12-03 17:18:07 +01:00
* indicatorp = ( unsigned long ) indicators ( vcdev ) ;
2013-02-06 10:23:39 +01:00
if ( vcdev - > is_thinint ) {
ret = virtio_ccw_register_adapter_ind ( vcdev , vqs , nvqs , ccw ) ;
if ( ret )
/* no error, just fall back to legacy interrupts */
2017-01-02 09:59:40 +01:00
vcdev - > is_thinint = false ;
2013-02-06 10:23:39 +01:00
}
if ( ! vcdev - > is_thinint ) {
/* Register queue indicators with host. */
2018-12-03 17:18:07 +01:00
* indicators ( vcdev ) = 0 ;
2013-02-06 10:23:39 +01:00
ccw - > cmd_code = CCW_CMD_SET_IND ;
ccw - > flags = 0 ;
2018-12-03 17:18:07 +01:00
ccw - > count = sizeof ( indicators ( vcdev ) ) ;
2013-02-06 10:23:39 +01:00
ccw - > cda = ( __u32 ) ( unsigned long ) indicatorp ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_SET_IND ) ;
if ( ret )
goto out ;
}
2012-12-14 17:02:18 +01:00
/* Register indicators2 with host for config changes */
2018-12-03 17:18:07 +01:00
* indicatorp = ( unsigned long ) indicators2 ( vcdev ) ;
* indicators2 ( vcdev ) = 0 ;
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_SET_CONF_IND ;
ccw - > flags = 0 ;
2018-12-03 17:18:07 +01:00
ccw - > count = sizeof ( indicators2 ( vcdev ) ) ;
2013-01-07 15:51:51 +01:00
ccw - > cda = ( __u32 ) ( unsigned long ) indicatorp ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_SET_CONF_IND ) ;
2012-12-14 17:02:18 +01:00
if ( ret )
goto out ;
2018-10-01 19:01:58 +02:00
if ( indicatorp )
ccw_device_dma_free ( vcdev - > cdev , indicatorp ,
sizeof ( indicators ( vcdev ) ) ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
return 0 ;
out :
2018-10-01 19:01:58 +02:00
if ( indicatorp )
ccw_device_dma_free ( vcdev - > cdev , indicatorp ,
sizeof ( indicators ( vcdev ) ) ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
virtio_ccw_del_vqs ( vdev ) ;
return ret ;
}
static void virtio_ccw_reset ( struct virtio_device * vdev )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return ;
2012-12-14 17:02:18 +01:00
/* Zero status bits. */
2018-10-01 19:01:58 +02:00
vcdev - > dma_area - > status = 0 ;
2012-12-14 17:02:18 +01:00
/* Send a reset ccw on device. */
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_VDEV_RESET ;
ccw - > flags = 0 ;
ccw - > count = 0 ;
ccw - > cda = 0 ;
ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_RESET ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
}
2014-10-07 16:39:43 +02:00
static u64 virtio_ccw_get_features ( struct virtio_device * vdev )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2013-01-07 15:51:51 +01:00
struct virtio_feature_desc * features ;
2014-11-27 13:54:28 +02:00
int ret ;
u64 rc ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
2012-12-14 17:02:18 +01:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return 0 ;
2018-10-01 19:01:58 +02:00
features = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * features ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! features ) {
rc = 0 ;
goto out_free ;
}
2012-12-14 17:02:18 +01:00
/* Read the feature bits from the host. */
2013-01-07 15:51:51 +01:00
features - > index = 0 ;
ccw - > cmd_code = CCW_CMD_READ_FEAT ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( * features ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) features ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_READ_FEAT ) ;
if ( ret ) {
rc = 0 ;
goto out_free ;
}
rc = le32_to_cpu ( features - > features ) ;
2012-12-14 17:02:18 +01:00
2014-12-04 18:59:50 +02:00
if ( vcdev - > revision = = 0 )
goto out_free ;
2014-11-27 13:54:28 +02:00
/* Read second half of the feature bits from the host. */
features - > index = 1 ;
ccw - > cmd_code = CCW_CMD_READ_FEAT ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( * features ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) features ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_READ_FEAT ) ;
if ( ret = = 0 )
rc | = ( u64 ) le32_to_cpu ( features - > features ) < < 32 ;
2013-01-07 15:51:51 +01:00
out_free :
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , features , sizeof ( * features ) ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
return rc ;
2012-12-14 17:02:18 +01:00
}
2018-11-21 18:03:29 +08:00
static void ccw_transport_features ( struct virtio_device * vdev )
{
/*
2019-04-26 20:32:38 +02:00
* Currently nothing to do here .
2018-11-21 18:03:29 +08:00
*/
}
2014-12-04 20:20:27 +02:00
static int virtio_ccw_finalize_features ( struct virtio_device * vdev )
2012-12-14 17:02:18 +01:00
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2013-01-07 15:51:51 +01:00
struct virtio_feature_desc * features ;
struct ccw1 * ccw ;
2014-12-09 11:46:59 +01:00
int ret ;
2013-01-07 15:51:51 +01:00
2014-12-09 14:46:44 +02:00
if ( vcdev - > revision > = 1 & &
2014-12-04 19:16:43 +02:00
! __virtio_test_bit ( vdev , VIRTIO_F_VERSION_1 ) ) {
dev_err ( & vdev - > dev , " virtio: device uses revision 1 "
" but does not have VIRTIO_F_VERSION_1 \n " ) ;
return - EINVAL ;
}
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
2014-12-09 11:46:59 +01:00
return - ENOMEM ;
2013-01-07 15:51:51 +01:00
2018-10-01 19:01:58 +02:00
features = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * features ) ) ;
2014-12-09 11:46:59 +01:00
if ( ! features ) {
ret = - ENOMEM ;
2013-01-07 15:51:51 +01:00
goto out_free ;
2014-12-09 11:46:59 +01:00
}
2012-12-14 17:02:18 +01:00
/* Give virtio_ring a chance to accept features. */
vring_transport_features ( vdev ) ;
2018-11-21 18:03:29 +08:00
/* Give virtio_ccw a chance to accept features. */
ccw_transport_features ( vdev ) ;
2014-10-07 16:39:42 +02:00
features - > index = 0 ;
2014-11-27 13:54:28 +02:00
features - > features = cpu_to_le32 ( ( u32 ) vdev - > features ) ;
/* Write the first half of the feature bits to the host. */
ccw - > cmd_code = CCW_CMD_WRITE_FEAT ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( * features ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) features ;
2014-12-09 11:46:59 +01:00
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_WRITE_FEAT ) ;
if ( ret )
goto out_free ;
2014-11-27 13:54:28 +02:00
2014-12-04 18:59:50 +02:00
if ( vcdev - > revision = = 0 )
goto out_free ;
2014-11-27 13:54:28 +02:00
features - > index = 1 ;
features - > features = cpu_to_le32 ( vdev - > features > > 32 ) ;
/* Write the second half of the feature bits to the host. */
2014-10-07 16:39:42 +02:00
ccw - > cmd_code = CCW_CMD_WRITE_FEAT ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( * features ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) features ;
2014-12-09 11:46:59 +01:00
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_WRITE_FEAT ) ;
2014-10-07 16:39:42 +02:00
2013-01-07 15:51:51 +01:00
out_free :
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , features , sizeof ( * features ) ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2014-12-04 20:20:27 +02:00
2014-12-09 11:46:59 +01:00
return ret ;
2012-12-14 17:02:18 +01:00
}
static void virtio_ccw_get_config ( struct virtio_device * vdev ,
unsigned int offset , void * buf , unsigned len )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
int ret ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
void * config_area ;
2018-09-26 18:48:29 +02:00
unsigned long flags ;
2013-01-07 15:51:51 +01:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return ;
2018-10-01 19:01:58 +02:00
config_area = ccw_device_dma_zalloc ( vcdev - > cdev ,
VIRTIO_CCW_CONFIG_SIZE ) ;
2013-01-07 15:51:51 +01:00
if ( ! config_area )
goto out_free ;
2012-12-14 17:02:18 +01:00
/* Read the config area from the host. */
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_READ_CONF ;
ccw - > flags = 0 ;
ccw - > count = offset + len ;
ccw - > cda = ( __u32 ) ( unsigned long ) config_area ;
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_READ_CONFIG ) ;
2012-12-14 17:02:18 +01:00
if ( ret )
2013-01-07 15:51:51 +01:00
goto out_free ;
2012-12-14 17:02:18 +01:00
2018-09-26 18:48:29 +02:00
spin_lock_irqsave ( & vcdev - > lock , flags ) ;
2015-06-29 16:44:01 +02:00
memcpy ( vcdev - > config , config_area , offset + len ) ;
if ( vcdev - > config_ready < offset + len )
vcdev - > config_ready = offset + len ;
2018-09-26 18:48:29 +02:00
spin_unlock_irqrestore ( & vcdev - > lock , flags ) ;
if ( buf )
memcpy ( buf , config_area + offset , len ) ;
2013-01-07 15:51:51 +01:00
out_free :
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , config_area , VIRTIO_CCW_CONFIG_SIZE ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
}
static void virtio_ccw_set_config ( struct virtio_device * vdev ,
unsigned int offset , const void * buf ,
unsigned len )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
void * config_area ;
2018-09-26 18:48:29 +02:00
unsigned long flags ;
2013-01-07 15:51:51 +01:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return ;
2018-10-01 19:01:58 +02:00
config_area = ccw_device_dma_zalloc ( vcdev - > cdev ,
VIRTIO_CCW_CONFIG_SIZE ) ;
2013-01-07 15:51:51 +01:00
if ( ! config_area )
goto out_free ;
2012-12-14 17:02:18 +01:00
2015-06-29 16:44:01 +02:00
/* Make sure we don't overwrite fields. */
if ( vcdev - > config_ready < offset )
virtio_ccw_get_config ( vdev , 0 , NULL , offset ) ;
2018-09-26 18:48:29 +02:00
spin_lock_irqsave ( & vcdev - > lock , flags ) ;
2012-12-14 17:02:18 +01:00
memcpy ( & vcdev - > config [ offset ] , buf , len ) ;
/* Write the config area to the host. */
2013-01-07 15:51:51 +01:00
memcpy ( config_area , vcdev - > config , sizeof ( vcdev - > config ) ) ;
2018-09-26 18:48:29 +02:00
spin_unlock_irqrestore ( & vcdev - > lock , flags ) ;
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_WRITE_CONF ;
ccw - > flags = 0 ;
ccw - > count = offset + len ;
ccw - > cda = ( __u32 ) ( unsigned long ) config_area ;
ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_WRITE_CONFIG ) ;
out_free :
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , config_area , VIRTIO_CCW_CONFIG_SIZE ) ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
}
static u8 virtio_ccw_get_status ( struct virtio_device * vdev )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2018-10-01 19:01:58 +02:00
u8 old_status = vcdev - > dma_area - > status ;
2015-08-28 11:09:32 +02:00
struct ccw1 * ccw ;
if ( vcdev - > revision < 1 )
2018-10-01 19:01:58 +02:00
return vcdev - > dma_area - > status ;
2015-08-28 11:09:32 +02:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2015-08-28 11:09:32 +02:00
if ( ! ccw )
return old_status ;
ccw - > cmd_code = CCW_CMD_READ_STATUS ;
ccw - > flags = 0 ;
2018-10-01 19:01:58 +02:00
ccw - > count = sizeof ( vcdev - > dma_area - > status ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) & vcdev - > dma_area - > status ;
2015-08-28 11:09:32 +02:00
ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_READ_STATUS ) ;
/*
* If the channel program failed ( should only happen if the device
* was hotunplugged , and then we clean up via the machine check
2018-10-01 19:01:58 +02:00
* handler anyway ) , vcdev - > dma_area - > status was not overwritten and we just
2015-08-28 11:09:32 +02:00
* return the old status , which is fine .
*/
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
2018-10-01 19:01:58 +02:00
return vcdev - > dma_area - > status ;
2012-12-14 17:02:18 +01:00
}
static void virtio_ccw_set_status ( struct virtio_device * vdev , u8 status )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
2018-10-01 19:01:58 +02:00
u8 old_status = vcdev - > dma_area - > status ;
2013-01-07 15:51:51 +01:00
struct ccw1 * ccw ;
2014-10-23 18:40:30 +03:00
int ret ;
2013-01-07 15:51:51 +01:00
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2013-01-07 15:51:51 +01:00
if ( ! ccw )
return ;
2012-12-14 17:02:18 +01:00
/* Write the status to the host. */
2018-10-01 19:01:58 +02:00
vcdev - > dma_area - > status = status ;
2013-01-07 15:51:51 +01:00
ccw - > cmd_code = CCW_CMD_WRITE_STATUS ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( status ) ;
2018-10-01 19:01:58 +02:00
ccw - > cda = ( __u32 ) ( unsigned long ) & vcdev - > dma_area - > status ;
2014-10-23 18:40:30 +03:00
ret = ccw_io_helper ( vcdev , ccw , VIRTIO_CCW_DOING_WRITE_STATUS ) ;
/* Write failed? We assume status is unchanged. */
if ( ret )
2018-10-01 19:01:58 +02:00
vcdev - > dma_area - > status = old_status ;
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2012-12-14 17:02:18 +01:00
}
2019-01-21 13:19:44 +01:00
static const char * virtio_ccw_bus_name ( struct virtio_device * vdev )
{
struct virtio_ccw_device * vcdev = to_vc_device ( vdev ) ;
return dev_name ( & vcdev - > cdev - > dev ) ;
}
2017-01-14 00:18:56 +05:30
static const struct virtio_config_ops virtio_ccw_config_ops = {
2012-12-14 17:02:18 +01:00
. get_features = virtio_ccw_get_features ,
. finalize_features = virtio_ccw_finalize_features ,
. get = virtio_ccw_get_config ,
. set = virtio_ccw_set_config ,
. get_status = virtio_ccw_get_status ,
. set_status = virtio_ccw_set_status ,
. reset = virtio_ccw_reset ,
. find_vqs = virtio_ccw_find_vqs ,
. del_vqs = virtio_ccw_del_vqs ,
2019-01-21 13:19:44 +01:00
. bus_name = virtio_ccw_bus_name ,
2012-12-14 17:02:18 +01:00
} ;
/*
* ccw bus driver related functions
*/
static void virtio_ccw_release_dev ( struct device * _d )
{
2016-03-01 13:44:52 +01:00
struct virtio_device * dev = dev_to_virtio ( _d ) ;
2012-12-14 17:02:18 +01:00
struct virtio_ccw_device * vcdev = to_vc_device ( dev ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , vcdev - > dma_area ,
sizeof ( * vcdev - > dma_area ) ) ;
2012-12-14 17:02:18 +01:00
kfree ( vcdev ) ;
}
static int irb_is_error ( struct irb * irb )
{
if ( scsw_cstat ( & irb - > scsw ) ! = 0 )
return 1 ;
if ( scsw_dstat ( & irb - > scsw ) & ~ ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) )
return 1 ;
if ( scsw_cc ( & irb - > scsw ) ! = 0 )
return 1 ;
return 0 ;
}
static struct virtqueue * virtio_ccw_vq_by_ind ( struct virtio_ccw_device * vcdev ,
int index )
{
struct virtio_ccw_vq_info * info ;
unsigned long flags ;
struct virtqueue * vq ;
vq = NULL ;
spin_lock_irqsave ( & vcdev - > lock , flags ) ;
list_for_each_entry ( info , & vcdev - > virtqueues , node ) {
2013-03-21 14:17:34 +00:00
if ( info - > vq - > index = = index ) {
2012-12-14 17:02:18 +01:00
vq = info - > vq ;
break ;
}
}
spin_unlock_irqrestore ( & vcdev - > lock , flags ) ;
return vq ;
}
2015-12-03 17:24:00 +01:00
static void virtio_ccw_check_activity ( struct virtio_ccw_device * vcdev ,
__u32 activity )
{
if ( vcdev - > curr_io & activity ) {
switch ( activity ) {
case VIRTIO_CCW_DOING_READ_FEAT :
case VIRTIO_CCW_DOING_WRITE_FEAT :
case VIRTIO_CCW_DOING_READ_CONFIG :
case VIRTIO_CCW_DOING_WRITE_CONFIG :
case VIRTIO_CCW_DOING_WRITE_STATUS :
2015-08-28 11:09:32 +02:00
case VIRTIO_CCW_DOING_READ_STATUS :
2015-12-03 17:24:00 +01:00
case VIRTIO_CCW_DOING_SET_VQ :
case VIRTIO_CCW_DOING_SET_IND :
case VIRTIO_CCW_DOING_SET_CONF_IND :
case VIRTIO_CCW_DOING_RESET :
case VIRTIO_CCW_DOING_READ_VQ_CONF :
case VIRTIO_CCW_DOING_SET_IND_ADAPTER :
case VIRTIO_CCW_DOING_SET_VIRTIO_REV :
vcdev - > curr_io & = ~ activity ;
wake_up ( & vcdev - > wait_q ) ;
break ;
default :
/* don't know what to do... */
dev_warn ( & vcdev - > cdev - > dev ,
" Suspicious activity '%08x' \n " , activity ) ;
WARN_ON ( 1 ) ;
break ;
}
}
}
2012-12-14 17:02:18 +01:00
static void virtio_ccw_int_handler ( struct ccw_device * cdev ,
unsigned long intparm ,
struct irb * irb )
{
__u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK ;
struct virtio_ccw_device * vcdev = dev_get_drvdata ( & cdev - > dev ) ;
int i ;
struct virtqueue * vq ;
2014-02-27 14:34:35 +01:00
if ( ! vcdev )
return ;
2015-12-03 17:24:00 +01:00
if ( IS_ERR ( irb ) ) {
vcdev - > err = PTR_ERR ( irb ) ;
virtio_ccw_check_activity ( vcdev , activity ) ;
/* Don't poke around indicators, something's wrong. */
return ;
}
2012-12-14 17:02:18 +01:00
/* Check if it's a notification from the host. */
if ( ( intparm = = 0 ) & &
( scsw_stctl ( & irb - > scsw ) = =
( SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND ) ) ) {
/* OK */
}
2013-06-04 11:04:47 +02:00
if ( irb_is_error ( irb ) ) {
/* Command reject? */
if ( ( scsw_dstat ( & irb - > scsw ) & DEV_STAT_UNIT_CHECK ) & &
( irb - > ecw [ 0 ] & SNS0_CMD_REJECT ) )
vcdev - > err = - EOPNOTSUPP ;
else
/* Map everything else to -EIO. */
vcdev - > err = - EIO ;
}
2015-12-03 17:24:00 +01:00
virtio_ccw_check_activity ( vcdev , activity ) ;
2018-12-03 17:18:07 +01:00
for_each_set_bit ( i , indicators ( vcdev ) ,
sizeof ( * indicators ( vcdev ) ) * BITS_PER_BYTE ) {
2012-12-14 17:02:18 +01:00
/* The bit clear must happen before the vring kick. */
2018-12-03 17:18:07 +01:00
clear_bit ( i , indicators ( vcdev ) ) ;
2012-12-14 17:02:18 +01:00
barrier ( ) ;
vq = virtio_ccw_vq_by_ind ( vcdev , i ) ;
vring_interrupt ( 0 , vq ) ;
}
2018-12-03 17:18:07 +01:00
if ( test_bit ( 0 , indicators2 ( vcdev ) ) ) {
2014-10-14 10:40:34 +10:30
virtio_config_changed ( & vcdev - > vdev ) ;
2018-12-03 17:18:07 +01:00
clear_bit ( 0 , indicators2 ( vcdev ) ) ;
2012-12-14 17:02:18 +01:00
}
}
/*
* We usually want to autoonline all devices , but give the admin
* a way to exempt devices from this .
*/
# define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \
( 8 * sizeof ( long ) ) )
static unsigned long devs_no_auto [ __MAX_SSID + 1 ] [ __DEV_WORDS ] ;
static char * no_auto = " " ;
module_param ( no_auto , charp , 0444 ) ;
MODULE_PARM_DESC ( no_auto , " list of ccw bus id ranges not to be auto-onlined " ) ;
static int virtio_ccw_check_autoonline ( struct ccw_device * cdev )
{
struct ccw_dev_id id ;
ccw_device_get_id ( cdev , & id ) ;
if ( test_bit ( id . devno , devs_no_auto [ id . ssid ] ) )
return 0 ;
return 1 ;
}
static void virtio_ccw_auto_online ( void * data , async_cookie_t cookie )
{
struct ccw_device * cdev = data ;
int ret ;
ret = ccw_device_set_online ( cdev ) ;
if ( ret )
dev_warn ( & cdev - > dev , " Failed to set online: %d \n " , ret ) ;
}
static int virtio_ccw_probe ( struct ccw_device * cdev )
{
cdev - > handler = virtio_ccw_int_handler ;
if ( virtio_ccw_check_autoonline ( cdev ) )
async_schedule ( virtio_ccw_auto_online , cdev ) ;
return 0 ;
}
2014-02-27 14:34:35 +01:00
static struct virtio_ccw_device * virtio_grab_drvdata ( struct ccw_device * cdev )
{
unsigned long flags ;
struct virtio_ccw_device * vcdev ;
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
vcdev = dev_get_drvdata ( & cdev - > dev ) ;
2014-03-05 15:23:54 +01:00
if ( ! vcdev | | vcdev - > going_away ) {
2014-02-27 14:34:35 +01:00
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
return NULL ;
}
2014-03-05 15:23:54 +01:00
vcdev - > going_away = true ;
2014-02-27 14:34:35 +01:00
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
return vcdev ;
}
2012-12-14 17:02:18 +01:00
static void virtio_ccw_remove ( struct ccw_device * cdev )
{
2014-03-05 15:23:54 +01:00
unsigned long flags ;
2014-02-27 14:34:35 +01:00
struct virtio_ccw_device * vcdev = virtio_grab_drvdata ( cdev ) ;
2012-12-14 17:02:18 +01:00
2014-04-28 11:24:05 +09:30
if ( vcdev & & cdev - > online ) {
if ( vcdev - > device_lost )
virtio_break_device ( & vcdev - > vdev ) ;
2012-12-14 17:02:18 +01:00
unregister_virtio_device ( & vcdev - > vdev ) ;
2014-04-28 11:24:05 +09:30
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
dev_set_drvdata ( & cdev - > dev , NULL ) ;
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
}
2012-12-14 17:02:18 +01:00
cdev - > handler = NULL ;
}
static int virtio_ccw_offline ( struct ccw_device * cdev )
{
2014-03-05 15:23:54 +01:00
unsigned long flags ;
2014-02-27 14:34:35 +01:00
struct virtio_ccw_device * vcdev = virtio_grab_drvdata ( cdev ) ;
2012-12-14 17:02:18 +01:00
2014-04-28 11:24:05 +09:30
if ( ! vcdev )
return 0 ;
if ( vcdev - > device_lost )
virtio_break_device ( & vcdev - > vdev ) ;
unregister_virtio_device ( & vcdev - > vdev ) ;
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
dev_set_drvdata ( & cdev - > dev , NULL ) ;
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
2012-12-14 17:02:18 +01:00
return 0 ;
}
2014-10-07 16:39:50 +02:00
static int virtio_ccw_set_transport_rev ( struct virtio_ccw_device * vcdev )
{
struct virtio_rev_info * rev ;
struct ccw1 * ccw ;
int ret ;
2018-10-01 19:01:58 +02:00
ccw = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * ccw ) ) ;
2014-10-07 16:39:50 +02:00
if ( ! ccw )
return - ENOMEM ;
2018-10-01 19:01:58 +02:00
rev = ccw_device_dma_zalloc ( vcdev - > cdev , sizeof ( * rev ) ) ;
2014-10-07 16:39:50 +02:00
if ( ! rev ) {
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
2014-10-07 16:39:50 +02:00
return - ENOMEM ;
}
/* Set transport revision */
ccw - > cmd_code = CCW_CMD_SET_VIRTIO_REV ;
ccw - > flags = 0 ;
ccw - > count = sizeof ( * rev ) ;
ccw - > cda = ( __u32 ) ( unsigned long ) rev ;
vcdev - > revision = VIRTIO_CCW_REV_MAX ;
do {
rev - > revision = vcdev - > revision ;
/* none of our supported revisions carry payload */
rev - > length = 0 ;
ret = ccw_io_helper ( vcdev , ccw ,
VIRTIO_CCW_DOING_SET_VIRTIO_REV ) ;
if ( ret = = - EOPNOTSUPP ) {
if ( vcdev - > revision = = 0 )
/*
* The host device does not support setting
* the revision : let ' s operate it in legacy
* mode .
*/
ret = 0 ;
else
vcdev - > revision - - ;
}
} while ( ret = = - EOPNOTSUPP ) ;
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , ccw , sizeof ( * ccw ) ) ;
ccw_device_dma_free ( vcdev - > cdev , rev , sizeof ( * rev ) ) ;
2014-10-07 16:39:50 +02:00
return ret ;
}
2012-12-14 17:02:18 +01:00
static int virtio_ccw_online ( struct ccw_device * cdev )
{
int ret ;
struct virtio_ccw_device * vcdev ;
2014-02-27 14:34:35 +01:00
unsigned long flags ;
2012-12-14 17:02:18 +01:00
vcdev = kzalloc ( sizeof ( * vcdev ) , GFP_KERNEL ) ;
if ( ! vcdev ) {
dev_warn ( & cdev - > dev , " Could not get memory for virtio \n " ) ;
ret = - ENOMEM ;
goto out_free ;
}
2019-04-26 20:32:37 +02:00
vcdev - > vdev . dev . parent = & cdev - > dev ;
2018-10-01 19:01:58 +02:00
vcdev - > cdev = cdev ;
vcdev - > dma_area = ccw_device_dma_zalloc ( vcdev - > cdev ,
sizeof ( * vcdev - > dma_area ) ) ;
if ( ! vcdev - > dma_area ) {
2012-12-14 17:02:18 +01:00
ret = - ENOMEM ;
goto out_free ;
}
2013-02-06 10:23:39 +01:00
vcdev - > is_thinint = virtio_ccw_use_airq ; /* at least try */
2012-12-14 17:02:18 +01:00
vcdev - > vdev . dev . release = virtio_ccw_release_dev ;
vcdev - > vdev . config = & virtio_ccw_config_ops ;
init_waitqueue_head ( & vcdev - > wait_q ) ;
INIT_LIST_HEAD ( & vcdev - > virtqueues ) ;
spin_lock_init ( & vcdev - > lock ) ;
2018-09-26 18:48:30 +02:00
mutex_init ( & vcdev - > io_lock ) ;
2012-12-14 17:02:18 +01:00
2014-02-27 14:34:35 +01:00
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
2012-12-14 17:02:18 +01:00
dev_set_drvdata ( & cdev - > dev , vcdev ) ;
2014-02-27 14:34:35 +01:00
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
2012-12-14 17:02:18 +01:00
vcdev - > vdev . id . vendor = cdev - > id . cu_type ;
vcdev - > vdev . id . device = cdev - > id . cu_model ;
2014-10-07 16:39:50 +02:00
2015-04-15 10:17:44 +09:30
ret = virtio_ccw_set_transport_rev ( vcdev ) ;
if ( ret )
goto out_free ;
2014-10-07 16:39:50 +02:00
2012-12-14 17:02:18 +01:00
ret = register_virtio_device ( & vcdev - > vdev ) ;
if ( ret ) {
dev_warn ( & cdev - > dev , " Failed to register virtio device: %d \n " ,
ret ) ;
goto out_put ;
}
return 0 ;
out_put :
2014-02-27 14:34:35 +01:00
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
2012-12-14 17:02:18 +01:00
dev_set_drvdata ( & cdev - > dev , NULL ) ;
2014-02-27 14:34:35 +01:00
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
2012-12-14 17:02:18 +01:00
put_device ( & vcdev - > vdev . dev ) ;
return ret ;
out_free :
if ( vcdev ) {
2018-10-01 19:01:58 +02:00
ccw_device_dma_free ( vcdev - > cdev , vcdev - > dma_area ,
sizeof ( * vcdev - > dma_area ) ) ;
2012-12-14 17:02:18 +01:00
}
kfree ( vcdev ) ;
return ret ;
}
static int virtio_ccw_cio_notify ( struct ccw_device * cdev , int event )
{
2014-04-28 11:24:05 +09:30
int rc ;
struct virtio_ccw_device * vcdev = dev_get_drvdata ( & cdev - > dev ) ;
/*
* Make sure vcdev is set
* i . e . set_offline / remove callback not already running
*/
if ( ! vcdev )
return NOTIFY_DONE ;
switch ( event ) {
case CIO_GONE :
vcdev - > device_lost = true ;
rc = NOTIFY_DONE ;
break ;
2017-12-18 17:21:23 +01:00
case CIO_OPER :
rc = NOTIFY_OK ;
break ;
2014-04-28 11:24:05 +09:30
default :
rc = NOTIFY_DONE ;
break ;
}
return rc ;
2012-12-14 17:02:18 +01:00
}
static struct ccw_device_id virtio_ids [ ] = {
{ CCW_DEVICE ( 0x3832 , 0 ) } ,
{ } ,
} ;
static struct ccw_driver virtio_ccw_driver = {
. driver = {
. owner = THIS_MODULE ,
. name = " virtio_ccw " ,
} ,
. ids = virtio_ids ,
. probe = virtio_ccw_probe ,
. remove = virtio_ccw_remove ,
. set_offline = virtio_ccw_offline ,
. set_online = virtio_ccw_online ,
. notify = virtio_ccw_cio_notify ,
2013-02-24 13:07:18 -08:00
. int_class = IRQIO_VIR ,
2012-12-14 17:02:18 +01:00
} ;
static int __init pure_hex ( char * * cp , unsigned int * val , int min_digit ,
int max_digit , int max_val )
{
int diff ;
diff = 0 ;
* val = 0 ;
while ( diff < = max_digit ) {
int value = hex_to_bin ( * * cp ) ;
if ( value < 0 )
break ;
* val = * val * 16 + value ;
( * cp ) + + ;
diff + + ;
}
if ( ( diff < min_digit ) | | ( diff > max_digit ) | | ( * val > max_val ) )
return 1 ;
return 0 ;
}
static int __init parse_busid ( char * str , unsigned int * cssid ,
unsigned int * ssid , unsigned int * devno )
{
char * str_work ;
int rc , ret ;
rc = 1 ;
if ( * str = = ' \0 ' )
goto out ;
str_work = str ;
ret = pure_hex ( & str_work , cssid , 1 , 2 , __MAX_CSSID ) ;
if ( ret | | ( str_work [ 0 ] ! = ' . ' ) )
goto out ;
str_work + + ;
ret = pure_hex ( & str_work , ssid , 1 , 1 , __MAX_SSID ) ;
if ( ret | | ( str_work [ 0 ] ! = ' . ' ) )
goto out ;
str_work + + ;
ret = pure_hex ( & str_work , devno , 4 , 4 , __MAX_SUBCHANNEL ) ;
if ( ret | | ( str_work [ 0 ] ! = ' \0 ' ) )
goto out ;
rc = 0 ;
out :
return rc ;
}
static void __init no_auto_parse ( void )
{
unsigned int from_cssid , to_cssid , from_ssid , to_ssid , from , to ;
char * parm , * str ;
int rc ;
str = no_auto ;
while ( ( parm = strsep ( & str , " , " ) ) ) {
rc = parse_busid ( strsep ( & parm , " - " ) , & from_cssid ,
& from_ssid , & from ) ;
if ( rc )
continue ;
if ( parm ! = NULL ) {
rc = parse_busid ( parm , & to_cssid ,
& to_ssid , & to ) ;
if ( ( from_ssid > to_ssid ) | |
( ( from_ssid = = to_ssid ) & & ( from > to ) ) )
rc = - EINVAL ;
} else {
to_cssid = from_cssid ;
to_ssid = from_ssid ;
to = from ;
}
if ( rc )
continue ;
while ( ( from_ssid < to_ssid ) | |
( ( from_ssid = = to_ssid ) & & ( from < = to ) ) ) {
set_bit ( from , devs_no_auto [ from_ssid ] ) ;
from + + ;
if ( from > __MAX_SUBCHANNEL ) {
from_ssid + + ;
from = 0 ;
}
}
}
}
static int __init virtio_ccw_init ( void )
{
2019-03-26 19:03:47 +01:00
int rc ;
2012-12-14 17:02:18 +01:00
/* parse no_auto string before we do anything further */
no_auto_parse ( ) ;
2019-03-26 19:03:47 +01:00
summary_indicators = cio_dma_zalloc ( MAX_AIRQ_AREAS ) ;
if ( ! summary_indicators )
return - ENOMEM ;
rc = ccw_driver_register ( & virtio_ccw_driver ) ;
if ( rc )
cio_dma_free ( summary_indicators , MAX_AIRQ_AREAS ) ;
return rc ;
2012-12-14 17:02:18 +01:00
}
2016-10-30 16:37:32 -04:00
device_initcall ( virtio_ccw_init ) ;