2020-05-05 14:27:41 +02:00
// SPDX-License-Identifier: GPL-2.0
/*
* Channel path related status regions for vfio_ccw
*
* Copyright IBM Corp . 2020
*
* Author ( s ) : Farhan Ali < alifm @ linux . ibm . com >
* Eric Farman < farman @ linux . ibm . com >
*/
2020-07-02 19:26:28 -07:00
# include <linux/slab.h>
2020-05-05 14:27:41 +02:00
# include <linux/vfio.h>
# include "vfio_ccw_private.h"
static ssize_t vfio_ccw_schib_region_read ( struct vfio_ccw_private * private ,
char __user * buf , size_t count ,
loff_t * ppos )
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX ( * ppos ) - VFIO_CCW_NUM_REGIONS ;
loff_t pos = * ppos & VFIO_CCW_OFFSET_MASK ;
struct ccw_schib_region * region ;
int ret ;
if ( pos + count > sizeof ( * region ) )
return - EINVAL ;
mutex_lock ( & private - > io_mutex ) ;
region = private - > region [ i ] . data ;
if ( cio_update_schib ( private - > sch ) ) {
ret = - ENODEV ;
goto out ;
}
memcpy ( region , & private - > sch - > schib , sizeof ( * region ) ) ;
if ( copy_to_user ( buf , ( void * ) region + pos , count ) ) {
ret = - EFAULT ;
goto out ;
}
ret = count ;
out :
mutex_unlock ( & private - > io_mutex ) ;
return ret ;
}
static ssize_t vfio_ccw_schib_region_write ( struct vfio_ccw_private * private ,
const char __user * buf , size_t count ,
loff_t * ppos )
{
return - EINVAL ;
}
static void vfio_ccw_schib_region_release ( struct vfio_ccw_private * private ,
struct vfio_ccw_region * region )
{
}
2020-06-04 13:20:45 +02:00
static const struct vfio_ccw_regops vfio_ccw_schib_region_ops = {
2020-05-05 14:27:41 +02:00
. read = vfio_ccw_schib_region_read ,
. write = vfio_ccw_schib_region_write ,
. release = vfio_ccw_schib_region_release ,
} ;
int vfio_ccw_register_schib_dev_regions ( struct vfio_ccw_private * private )
{
return vfio_ccw_register_dev_region ( private ,
VFIO_REGION_SUBTYPE_CCW_SCHIB ,
& vfio_ccw_schib_region_ops ,
sizeof ( struct ccw_schib_region ) ,
VFIO_REGION_INFO_FLAG_READ ,
private - > schib_region ) ;
}
2020-05-05 14:27:43 +02:00
static ssize_t vfio_ccw_crw_region_read ( struct vfio_ccw_private * private ,
char __user * buf , size_t count ,
loff_t * ppos )
{
unsigned int i = VFIO_CCW_OFFSET_TO_INDEX ( * ppos ) - VFIO_CCW_NUM_REGIONS ;
loff_t pos = * ppos & VFIO_CCW_OFFSET_MASK ;
struct ccw_crw_region * region ;
2020-05-05 14:27:44 +02:00
struct vfio_ccw_crw * crw ;
2020-05-05 14:27:43 +02:00
int ret ;
if ( pos + count > sizeof ( * region ) )
return - EINVAL ;
2020-05-05 14:27:44 +02:00
crw = list_first_entry_or_null ( & private - > crw ,
struct vfio_ccw_crw , next ) ;
if ( crw )
list_del ( & crw - > next ) ;
2020-05-05 14:27:43 +02:00
mutex_lock ( & private - > io_mutex ) ;
region = private - > region [ i ] . data ;
2020-05-05 14:27:44 +02:00
if ( crw )
memcpy ( & region - > crw , & crw - > crw , sizeof ( region - > crw ) ) ;
2020-05-05 14:27:43 +02:00
if ( copy_to_user ( buf , ( void * ) region + pos , count ) )
ret = - EFAULT ;
else
ret = count ;
region - > crw = 0 ;
mutex_unlock ( & private - > io_mutex ) ;
2020-05-05 14:27:44 +02:00
kfree ( crw ) ;
/* Notify the guest if more CRWs are on our queue */
if ( ! list_empty ( & private - > crw ) & & private - > crw_trigger )
eventfd_signal ( private - > crw_trigger , 1 ) ;
2020-05-05 14:27:43 +02:00
return ret ;
}
static ssize_t vfio_ccw_crw_region_write ( struct vfio_ccw_private * private ,
const char __user * buf , size_t count ,
loff_t * ppos )
{
return - EINVAL ;
}
static void vfio_ccw_crw_region_release ( struct vfio_ccw_private * private ,
struct vfio_ccw_region * region )
{
}
2020-06-04 13:20:45 +02:00
static const struct vfio_ccw_regops vfio_ccw_crw_region_ops = {
2020-05-05 14:27:43 +02:00
. read = vfio_ccw_crw_region_read ,
. write = vfio_ccw_crw_region_write ,
. release = vfio_ccw_crw_region_release ,
} ;
int vfio_ccw_register_crw_dev_regions ( struct vfio_ccw_private * private )
{
return vfio_ccw_register_dev_region ( private ,
VFIO_REGION_SUBTYPE_CCW_CRW ,
& vfio_ccw_crw_region_ops ,
sizeof ( struct ccw_crw_region ) ,
VFIO_REGION_INFO_FLAG_READ ,
private - > crw_region ) ;
}