2017-11-14 20:38:02 +03:00
// SPDX-License-Identifier: GPL-2.0+
2005-04-17 02:20:36 +04:00
/*
* Linux on zSeries Channel Measurement Facility support
*
2012-07-20 13:15:04 +04:00
* Copyright IBM Corp . 2000 , 2006
2005-04-17 02:20:36 +04:00
*
2006-06-29 17:08:41 +04:00
* Authors : Arnd Bergmann < arndb @ de . ibm . com >
* Cornelia Huck < cornelia . huck @ de . ibm . com >
2005-04-17 02:20:36 +04:00
*
* original idea from Natarajan Krishnaswami < nkrishna @ us . ibm . com >
*/
2008-12-25 15:39:36 +03:00
# define KMSG_COMPONENT "cio"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2018-10-31 01:09:49 +03:00
# include <linux/memblock.h>
2005-04-17 02:20:36 +04:00
# include <linux/device.h>
# include <linux/init.h>
# include <linux/list.h>
2016-10-30 23:37:24 +03:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
# include <linux/moduleparam.h>
2005-10-31 02:03:48 +03:00
# include <linux/slab.h>
2013-01-30 12:49:40 +04:00
# include <linux/timex.h> /* get_tod_clock() */
2005-04-17 02:20:36 +04:00
# include <asm/ccwdev.h>
# include <asm/cio.h>
# include <asm/cmb.h>
2005-10-31 02:03:48 +03:00
# include <asm/div64.h>
2005-04-17 02:20:36 +04:00
# include "cio.h"
# include "css.h"
# include "device.h"
# include "ioasm.h"
# include "chsc.h"
2007-10-12 18:11:15 +04:00
/*
* parameter to enable cmf during boot , possible uses are :
2005-04-17 02:20:36 +04:00
* " s390cmf " - - enable cmf and allocate 2 MB of ram so measuring can be
* used on any subchannel
* " s390cmf=<num> " - - enable cmf and allocate enough memory to measure
* < num > subchannel , where < num > is an integer
* between 1 and 65535 , default is 1024
*/
# define ARGSTRING "s390cmf"
/* indices for READCMB */
enum cmb_index {
2017-09-08 22:01:38 +03:00
avg_utilization = - 1 ,
2005-04-17 02:20:36 +04:00
/* basic and exended format: */
2017-09-08 22:01:38 +03:00
cmb_ssch_rsch_count = 0 ,
2005-04-17 02:20:36 +04:00
cmb_sample_count ,
cmb_device_connect_time ,
cmb_function_pending_time ,
cmb_device_disconnect_time ,
cmb_control_unit_queuing_time ,
cmb_device_active_only_time ,
/* extended format only: */
cmb_device_busy_time ,
cmb_initial_command_response_time ,
} ;
/**
* enum cmb_format - types of supported measurement block formats
*
* @ CMF_BASIC : traditional channel measurement blocks supported
2007-10-12 18:11:16 +04:00
* by all machines that we run on
2005-04-17 02:20:36 +04:00
* @ CMF_EXTENDED : improved format that was introduced with the z990
2007-10-12 18:11:16 +04:00
* machine
* @ CMF_AUTODETECT : default : use extended format when running on a machine
* supporting extended format , otherwise fall back to
* basic format
*/
2005-04-17 02:20:36 +04:00
enum cmb_format {
CMF_BASIC ,
CMF_EXTENDED ,
CMF_AUTODETECT = - 1 ,
} ;
2007-10-12 18:11:15 +04:00
2007-10-12 18:11:16 +04:00
/*
2005-04-17 02:20:36 +04:00
* format - actual format for all measurement blocks
*
* The format module parameter can be set to a value of 0 ( zero )
* or 1 , indicating basic or extended format as described for
* enum cmb_format .
*/
static int format = CMF_AUTODETECT ;
2012-01-13 03:02:17 +04:00
module_param ( format , bint , 0444 ) ;
2005-04-17 02:20:36 +04:00
/**
* struct cmb_operations - functions to use depending on cmb_format
*
2006-06-29 17:08:41 +04:00
* Most of these functions operate on a struct ccw_device . There is only
* one instance of struct cmb_operations because the format of the measurement
* data is guaranteed to be the same for every ccw_device .
2005-04-17 02:20:36 +04:00
*
* @ alloc : allocate memory for a channel measurement block ,
* either with the help of a special pool or with kmalloc
* @ free : free memory allocated with @ alloc
* @ set : enable or disable measurement
2007-10-12 18:11:16 +04:00
* @ read : read a measurement entry at an index
2005-04-17 02:20:36 +04:00
* @ readall : read a measurement block in a common format
* @ reset : clear the data in the associated measurement block and
* reset its time stamp
*/
struct cmb_operations {
2007-10-12 18:11:15 +04:00
int ( * alloc ) ( struct ccw_device * ) ;
void ( * free ) ( struct ccw_device * ) ;
int ( * set ) ( struct ccw_device * , u32 ) ;
u64 ( * read ) ( struct ccw_device * , int ) ;
int ( * readall ) ( struct ccw_device * , struct cmbdata * ) ;
void ( * reset ) ( struct ccw_device * ) ;
2007-10-12 18:11:16 +04:00
/* private: */
2005-04-17 02:20:36 +04:00
struct attribute_group * attr_group ;
} ;
static struct cmb_operations * cmbops ;
2006-06-29 17:08:41 +04:00
struct cmb_data {
void * hw_block ; /* Pointer to block updated by hardware */
void * last_block ; /* Last changed block copied from hardware block */
int size ; /* Size of hw_block and last_block */
unsigned long long last_update ; /* when last_block was updated */
} ;
2007-10-12 18:11:15 +04:00
/*
* Our user interface is designed in terms of nanoseconds ,
2005-04-17 02:20:36 +04:00
* while the hardware measures total times in its own
2007-10-12 18:11:15 +04:00
* unit .
*/
2005-04-17 02:20:36 +04:00
static inline u64 time_to_nsec ( u32 value )
{
return ( ( u64 ) value ) * 128000ull ;
}
/*
* Users are usually interested in average times ,
* not accumulated time .
* This also helps us with atomicity problems
* when reading sinlge values .
*/
static inline u64 time_to_avg_nsec ( u32 value , u32 count )
{
u64 ret ;
/* no samples yet, avoid division by 0 */
if ( count = = 0 )
return 0 ;
2007-10-20 01:21:04 +04:00
/* value comes in units of 128 µsec */
2005-04-17 02:20:36 +04:00
ret = time_to_nsec ( value ) ;
do_div ( ret , count ) ;
return ret ;
}
2016-06-20 15:03:38 +03:00
# define CMF_OFF 0
# define CMF_ON 2
2007-10-12 18:11:15 +04:00
/*
* Activate or deactivate the channel monitor . When area is NULL ,
2005-04-17 02:20:36 +04:00
* the monitor is deactivated . The channel monitor needs to
* be active in order to measure subchannels , which also need
2007-10-12 18:11:15 +04:00
* to be enabled .
*/
static inline void cmf_activate ( void * area , unsigned int onoff )
2005-04-17 02:20:36 +04:00
{
register void * __gpr2 asm ( " 2 " ) ;
register long __gpr1 asm ( " 1 " ) ;
__gpr2 = area ;
2016-06-20 15:03:38 +03:00
__gpr1 = onoff ;
2005-04-17 02:20:36 +04:00
/* activate channel measurement */
asm ( " schm " : : " d " ( __gpr2 ) , " d " ( __gpr1 ) ) ;
}
2007-10-12 18:11:15 +04:00
static int set_schib ( struct ccw_device * cdev , u32 mme , int mbfc ,
unsigned long address )
2005-04-17 02:20:36 +04:00
{
2015-09-07 20:51:39 +03:00
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
int ret ;
2005-04-17 02:20:36 +04:00
2008-12-25 15:39:13 +03:00
sch - > config . mme = mme ;
sch - > config . mbfc = mbfc ;
/* address can be either a block address or a block index */
if ( mbfc )
sch - > config . mba = address ;
else
sch - > config . mbi = address ;
2005-04-17 02:20:36 +04:00
2015-09-07 20:51:39 +03:00
ret = cio_commit_config ( sch ) ;
if ( ! mme & & ret = = - ENODEV ) {
/*
* The task was to disable measurement block updates but
* the subchannel is already gone . Report success .
*/
ret = 0 ;
}
return ret ;
2005-04-17 02:20:36 +04:00
}
struct set_schib_struct {
u32 mme ;
int mbfc ;
unsigned long address ;
wait_queue_head_t wait ;
int ret ;
} ;
2006-06-29 17:08:41 +04:00
# define CMF_PENDING 1
2017-09-05 15:22:48 +03:00
# define SET_SCHIB_TIMEOUT (10 * HZ)
2006-06-29 17:08:41 +04:00
2005-04-17 02:20:36 +04:00
static int set_schib_wait ( struct ccw_device * cdev , u32 mme ,
2017-09-06 14:43:20 +03:00
int mbfc , unsigned long address )
2005-04-17 02:20:36 +04:00
{
2017-09-06 14:43:20 +03:00
struct set_schib_struct set_data ;
int ret = - ENODEV ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2017-09-06 14:43:20 +03:00
if ( ! cdev - > private - > cmb )
2006-06-29 17:08:41 +04:00
goto out ;
ret = set_schib ( cdev , mme , mbfc , address ) ;
if ( ret ! = - EBUSY )
2017-09-06 14:43:20 +03:00
goto out ;
2005-04-17 02:20:36 +04:00
2017-09-06 14:43:20 +03:00
/* if the device is not online, don't even try again */
if ( cdev - > private - > state ! = DEV_STATE_ONLINE )
goto out ;
init_waitqueue_head ( & set_data . wait ) ;
set_data . mme = mme ;
set_data . mbfc = mbfc ;
set_data . address = address ;
set_data . ret = CMF_PENDING ;
2006-06-29 17:08:41 +04:00
2005-04-17 02:20:36 +04:00
cdev - > private - > state = DEV_STATE_CMFCHANGE ;
2017-09-06 14:43:20 +03:00
cdev - > private - > cmb_wait = & set_data ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2017-09-05 15:22:48 +03:00
2017-09-06 14:43:20 +03:00
ret = wait_event_interruptible_timeout ( set_data . wait ,
set_data . ret ! = CMF_PENDING ,
2017-09-05 15:22:48 +03:00
SET_SCHIB_TIMEOUT ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
if ( ret < = 0 ) {
2017-09-06 14:43:20 +03:00
if ( set_data . ret = = CMF_PENDING ) {
set_data . ret = ( ret = = 0 ) ? - ETIME : ret ;
2005-04-17 02:20:36 +04:00
if ( cdev - > private - > state = = DEV_STATE_CMFCHANGE )
cdev - > private - > state = DEV_STATE_ONLINE ;
}
}
2006-06-29 17:08:41 +04:00
cdev - > private - > cmb_wait = NULL ;
2017-09-06 14:43:20 +03:00
ret = set_data . ret ;
2006-06-29 17:08:41 +04:00
out :
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2006-06-29 17:08:41 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
void retry_set_schib ( struct ccw_device * cdev )
{
2017-09-06 14:43:20 +03:00
struct set_schib_struct * set_data = cdev - > private - > cmb_wait ;
2006-06-29 17:08:41 +04:00
2017-09-06 14:43:20 +03:00
if ( ! set_data )
2006-06-29 17:08:41 +04:00
return ;
2017-09-06 14:43:20 +03:00
2006-06-29 17:08:41 +04:00
set_data - > ret = set_schib ( cdev , set_data - > mme , set_data - > mbfc ,
set_data - > address ) ;
wake_up ( & set_data - > wait ) ;
}
static int cmf_copy_block ( struct ccw_device * cdev )
{
2017-09-06 11:44:23 +03:00
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
2017-09-06 11:44:23 +03:00
void * hw_block ;
2006-06-29 17:08:41 +04:00
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
2006-06-29 17:08:41 +04:00
return - ENODEV ;
2008-07-14 11:58:50 +04:00
if ( scsw_fctl ( & sch - > schib . scsw ) & SCSW_FCTL_START_FUNC ) {
2006-06-29 17:08:41 +04:00
/* Don't copy if a start function is in progress. */
2008-07-14 11:58:50 +04:00
if ( ( ! ( scsw_actl ( & sch - > schib . scsw ) & SCSW_ACTL_SUSPENDED ) ) & &
( scsw_actl ( & sch - > schib . scsw ) &
2006-06-29 17:08:41 +04:00
( SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT ) ) & &
2008-07-14 11:58:50 +04:00
( ! ( scsw_stctl ( & sch - > schib . scsw ) & SCSW_STCTL_SEC_STATUS ) ) )
2006-06-29 17:08:41 +04:00
return - EBUSY ;
}
cmb_data = cdev - > private - > cmb ;
2015-09-07 20:53:01 +03:00
hw_block = cmb_data - > hw_block ;
2017-09-06 11:44:23 +03:00
memcpy ( cmb_data - > last_block , hw_block , cmb_data - > size ) ;
2013-01-30 12:49:40 +04:00
cmb_data - > last_update = get_tod_clock ( ) ;
2006-06-29 17:08:41 +04:00
return 0 ;
}
struct copy_block_struct {
wait_queue_head_t wait ;
int ret ;
} ;
static int cmf_cmb_copy_wait ( struct ccw_device * cdev )
{
2017-09-06 20:05:29 +03:00
struct copy_block_struct copy_block ;
int ret = - ENODEV ;
2006-06-29 17:08:41 +04:00
2017-09-06 20:05:29 +03:00
spin_lock_irq ( cdev - > ccwlock ) ;
if ( ! cdev - > private - > cmb )
2006-06-29 17:08:41 +04:00
goto out ;
ret = cmf_copy_block ( cdev ) ;
if ( ret ! = - EBUSY )
2017-09-06 20:05:29 +03:00
goto out ;
2006-06-29 17:08:41 +04:00
2017-09-06 20:05:29 +03:00
if ( cdev - > private - > state ! = DEV_STATE_ONLINE )
goto out ;
init_waitqueue_head ( & copy_block . wait ) ;
copy_block . ret = CMF_PENDING ;
2006-06-29 17:08:41 +04:00
cdev - > private - > state = DEV_STATE_CMFUPDATE ;
2017-09-06 20:05:29 +03:00
cdev - > private - > cmb_wait = & copy_block ;
spin_unlock_irq ( cdev - > ccwlock ) ;
ret = wait_event_interruptible ( copy_block . wait ,
copy_block . ret ! = CMF_PENDING ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
if ( ret ) {
if ( copy_block . ret = = CMF_PENDING ) {
copy_block . ret = - ERESTARTSYS ;
2006-06-29 17:08:41 +04:00
if ( cdev - > private - > state = = DEV_STATE_CMFUPDATE )
cdev - > private - > state = DEV_STATE_ONLINE ;
}
}
cdev - > private - > cmb_wait = NULL ;
2017-09-06 20:05:29 +03:00
ret = copy_block . ret ;
2006-06-29 17:08:41 +04:00
out :
2017-09-06 20:05:29 +03:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2006-06-29 17:08:41 +04:00
return ret ;
}
void cmf_retry_copy_block ( struct ccw_device * cdev )
{
2017-09-06 20:05:29 +03:00
struct copy_block_struct * copy_block = cdev - > private - > cmb_wait ;
2005-04-17 02:20:36 +04:00
2017-09-06 20:05:29 +03:00
if ( ! copy_block )
2005-04-17 02:20:36 +04:00
return ;
2017-09-06 20:05:29 +03:00
2006-06-29 17:08:41 +04:00
copy_block - > ret = cmf_copy_block ( cdev ) ;
wake_up ( & copy_block - > wait ) ;
}
static void cmf_generic_reset ( struct ccw_device * cdev )
{
struct cmb_data * cmb_data ;
spin_lock_irq ( cdev - > ccwlock ) ;
cmb_data = cdev - > private - > cmb ;
if ( cmb_data ) {
memset ( cmb_data - > last_block , 0 , cmb_data - > size ) ;
/*
* Need to reset hw block as well to make the hardware start
* from 0 again .
*/
2015-09-07 20:53:01 +03:00
memset ( cmb_data - > hw_block , 0 , cmb_data - > size ) ;
2006-06-29 17:08:41 +04:00
cmb_data - > last_update = 0 ;
}
2013-01-30 12:49:40 +04:00
cdev - > private - > cmb_start_time = get_tod_clock ( ) ;
2006-06-29 17:08:41 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
}
/**
* struct cmb_area - container for global cmb data
*
* @ mem : pointer to CMBs ( only in basic measurement mode )
* @ list : contains a linked list of all subchannels
2007-10-12 18:11:16 +04:00
* @ num_channels : number of channels to be measured
2005-04-17 02:20:36 +04:00
* @ lock : protect concurrent access to @ mem and @ list
*/
struct cmb_area {
struct cmb * mem ;
struct list_head list ;
int num_channels ;
spinlock_t lock ;
} ;
static struct cmb_area cmb_area = {
2007-04-27 18:02:01 +04:00
. lock = __SPIN_LOCK_UNLOCKED ( cmb_area . lock ) ,
2005-04-17 02:20:36 +04:00
. list = LIST_HEAD_INIT ( cmb_area . list ) ,
. num_channels = 1024 ,
} ;
/* ****** old style CMB handling ********/
2007-10-12 18:11:15 +04:00
/*
2005-04-17 02:20:36 +04:00
* Basic channel measurement blocks are allocated in one contiguous
* block of memory , which can not be moved as long as any channel
* is active . Therefore , a maximum number of subchannels needs to
* be defined somewhere . This is a module parameter , defaulting to
tree-wide: fix assorted typos all over the place
That is "success", "unknown", "through", "performance", "[re|un]mapping"
, "access", "default", "reasonable", "[con]currently", "temperature"
, "channel", "[un]used", "application", "example","hierarchy", "therefore"
, "[over|under]flow", "contiguous", "threshold", "enough" and others.
Signed-off-by: André Goddard Rosa <andre.goddard@gmail.com>
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
2009-11-14 18:09:05 +03:00
* a reasonable value of 1024 , or 32 kb of memory .
2005-04-17 02:20:36 +04:00
* Current kernels don ' t allow kmalloc with more than 128 kb , so the
2007-10-12 18:11:15 +04:00
* maximum is 4096.
2005-04-17 02:20:36 +04:00
*/
module_param_named ( maxchannels , cmb_area . num_channels , uint , 0444 ) ;
/**
* struct cmb - basic channel measurement block
2007-10-12 18:11:16 +04:00
* @ ssch_rsch_count : number of ssch and rsch
* @ sample_count : number of samples
* @ device_connect_time : time of device connect
* @ function_pending_time : time of function pending
* @ device_disconnect_time : time of device disconnect
* @ control_unit_queuing_time : time of control unit queuing
* @ device_active_only_time : time of device active only
* @ reserved : unused in basic measurement mode
*
* The measurement block as used by the hardware . The fields are described
* further in z / Architecture Principles of Operation , chapter 17.
2005-04-17 02:20:36 +04:00
*
2007-10-12 18:11:16 +04:00
* The cmb area made up from these blocks must be a contiguous array and may
* not be reallocated or freed .
2005-04-17 02:20:36 +04:00
* Only one cmb area can be present in the system .
*/
struct cmb {
u16 ssch_rsch_count ;
u16 sample_count ;
u32 device_connect_time ;
u32 function_pending_time ;
u32 device_disconnect_time ;
u32 control_unit_queuing_time ;
u32 device_active_only_time ;
u32 reserved [ 2 ] ;
} ;
2007-10-12 18:11:15 +04:00
/*
* Insert a single device into the cmb_area list .
* Called with cmb_area . lock held from alloc_cmb .
2005-04-17 02:20:36 +04:00
*/
2007-02-05 23:18:53 +03:00
static int alloc_cmb_single ( struct ccw_device * cdev ,
struct cmb_data * cmb_data )
2005-04-17 02:20:36 +04:00
{
struct cmb * cmb ;
struct ccw_device_private * node ;
int ret ;
spin_lock_irq ( cdev - > ccwlock ) ;
if ( ! list_empty ( & cdev - > private - > cmb_list ) ) {
ret = - EBUSY ;
goto out ;
}
2007-10-12 18:11:15 +04:00
/*
* Find first unused cmb in cmb_area . mem .
* This is a little tricky : cmb_area . list
* remains sorted by - > cmb - > hw_data pointers .
*/
2005-04-17 02:20:36 +04:00
cmb = cmb_area . mem ;
list_for_each_entry ( node , & cmb_area . list , cmb_list ) {
2006-06-29 17:08:41 +04:00
struct cmb_data * data ;
data = node - > cmb ;
if ( ( struct cmb * ) data - > hw_block > cmb )
2005-04-17 02:20:36 +04:00
break ;
cmb + + ;
}
if ( cmb - cmb_area . mem > = cmb_area . num_channels ) {
ret = - ENOMEM ;
goto out ;
}
/* insert new cmb */
list_add_tail ( & cdev - > private - > cmb_list , & node - > cmb_list ) ;
2006-06-29 17:08:41 +04:00
cmb_data - > hw_block = cmb ;
cdev - > private - > cmb = cmb_data ;
2005-04-17 02:20:36 +04:00
ret = 0 ;
out :
spin_unlock_irq ( cdev - > ccwlock ) ;
return ret ;
}
2007-10-12 18:11:15 +04:00
static int alloc_cmb ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
int ret ;
struct cmb * mem ;
ssize_t size ;
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
/* Allocate private cmb_data. */
cmb_data = kzalloc ( sizeof ( struct cmb_data ) , GFP_KERNEL ) ;
if ( ! cmb_data )
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
cmb_data - > last_block = kzalloc ( sizeof ( struct cmb ) , GFP_KERNEL ) ;
if ( ! cmb_data - > last_block ) {
kfree ( cmb_data ) ;
return - ENOMEM ;
}
cmb_data - > size = sizeof ( struct cmb ) ;
2005-04-17 02:20:36 +04:00
spin_lock ( & cmb_area . lock ) ;
if ( ! cmb_area . mem ) {
/* there is no user yet, so we need a new area */
size = sizeof ( struct cmb ) * cmb_area . num_channels ;
WARN_ON ( ! list_empty ( & cmb_area . list ) ) ;
spin_unlock ( & cmb_area . lock ) ;
mem = ( void * ) __get_free_pages ( GFP_KERNEL | GFP_DMA ,
get_order ( size ) ) ;
spin_lock ( & cmb_area . lock ) ;
if ( cmb_area . mem ) {
/* ok, another thread was faster */
free_pages ( ( unsigned long ) mem , get_order ( size ) ) ;
} else if ( ! mem ) {
/* no luck */
ret = - ENOMEM ;
goto out ;
} else {
/* everything ok */
memset ( mem , 0 , size ) ;
cmb_area . mem = mem ;
2016-06-20 15:03:38 +03:00
cmf_activate ( cmb_area . mem , CMF_ON ) ;
2005-04-17 02:20:36 +04:00
}
}
/* do the actual allocation */
2006-06-29 17:08:41 +04:00
ret = alloc_cmb_single ( cdev , cmb_data ) ;
2005-04-17 02:20:36 +04:00
out :
spin_unlock ( & cmb_area . lock ) ;
2006-06-29 17:08:41 +04:00
if ( ret ) {
kfree ( cmb_data - > last_block ) ;
kfree ( cmb_data ) ;
}
2005-04-17 02:20:36 +04:00
return ret ;
}
2006-06-29 17:08:41 +04:00
static void free_cmb ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct ccw_device_private * priv ;
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
2005-04-17 02:20:36 +04:00
spin_lock ( & cmb_area . lock ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
2006-06-29 17:08:41 +04:00
priv = cdev - > private ;
cmb_data = priv - > cmb ;
2005-04-17 02:20:36 +04:00
priv - > cmb = NULL ;
2006-06-29 17:08:41 +04:00
if ( cmb_data )
kfree ( cmb_data - > last_block ) ;
kfree ( cmb_data ) ;
2005-04-17 02:20:36 +04:00
list_del_init ( & priv - > cmb_list ) ;
if ( list_empty ( & cmb_area . list ) ) {
ssize_t size ;
size = sizeof ( struct cmb ) * cmb_area . num_channels ;
2016-06-20 15:03:38 +03:00
cmf_activate ( NULL , CMF_OFF ) ;
2005-04-17 02:20:36 +04:00
free_pages ( ( unsigned long ) cmb_area . mem , get_order ( size ) ) ;
cmb_area . mem = NULL ;
}
spin_unlock_irq ( cdev - > ccwlock ) ;
spin_unlock ( & cmb_area . lock ) ;
}
2006-06-29 17:08:41 +04:00
static int set_cmb ( struct ccw_device * cdev , u32 mme )
2005-04-17 02:20:36 +04:00
{
u16 offset ;
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
if ( ! cdev - > private - > cmb ) {
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2006-06-29 17:08:41 +04:00
}
cmb_data = cdev - > private - > cmb ;
offset = mme ? ( struct cmb * ) cmb_data - > hw_block - cmb_area . mem : 0 ;
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
2005-04-17 02:20:36 +04:00
return set_schib_wait ( cdev , mme , 0 , offset ) ;
}
2017-09-08 22:01:38 +03:00
/* calculate utilization in 0.1 percent units */
static u64 __cmb_utilization ( u64 device_connect_time , u64 function_pending_time ,
u64 device_disconnect_time , u64 start_time )
{
u64 utilization , elapsed_time ;
utilization = time_to_nsec ( device_connect_time +
function_pending_time +
device_disconnect_time ) ;
elapsed_time = get_tod_clock ( ) - start_time ;
elapsed_time = tod_to_ns ( elapsed_time ) ;
elapsed_time / = 1000 ;
return elapsed_time ? ( utilization / elapsed_time ) : 0 ;
}
2007-10-12 18:11:15 +04:00
static u64 read_cmb ( struct ccw_device * cdev , int index )
2005-04-17 02:20:36 +04:00
{
2017-09-07 14:18:40 +03:00
struct cmb_data * cmb_data ;
unsigned long flags ;
2006-06-29 17:08:41 +04:00
struct cmb * cmb ;
2017-09-08 22:01:38 +03:00
u64 ret = 0 ;
2005-04-17 02:20:36 +04:00
u32 val ;
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
2017-09-07 14:18:40 +03:00
cmb_data = cdev - > private - > cmb ;
if ( ! cmb_data )
2006-06-29 17:08:41 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2017-09-07 14:18:40 +03:00
cmb = cmb_data - > hw_block ;
2005-04-17 02:20:36 +04:00
switch ( index ) {
2017-09-08 22:01:38 +03:00
case avg_utilization :
ret = __cmb_utilization ( cmb - > device_connect_time ,
cmb - > function_pending_time ,
cmb - > device_disconnect_time ,
cdev - > private - > cmb_start_time ) ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_ssch_rsch_count :
2006-06-29 17:08:41 +04:00
ret = cmb - > ssch_rsch_count ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_sample_count :
2006-06-29 17:08:41 +04:00
ret = cmb - > sample_count ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_device_connect_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_connect_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_function_pending_time :
2006-06-29 17:08:41 +04:00
val = cmb - > function_pending_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_device_disconnect_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_disconnect_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_control_unit_queuing_time :
2006-06-29 17:08:41 +04:00
val = cmb - > control_unit_queuing_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_device_active_only_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_active_only_time ;
2005-04-17 02:20:36 +04:00
break ;
default :
2006-06-29 17:08:41 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:41 +04:00
ret = time_to_avg_nsec ( val , cmb - > sample_count ) ;
out :
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:15 +04:00
static int readall_cmb ( struct ccw_device * cdev , struct cmbdata * data )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
struct cmb * cmb ;
struct cmb_data * cmb_data ;
2005-04-17 02:20:36 +04:00
u64 time ;
2006-06-29 17:08:41 +04:00
unsigned long flags ;
int ret ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
ret = cmf_cmb_copy_wait ( cdev ) ;
if ( ret < 0 )
return ret ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
2006-06-29 17:08:41 +04:00
cmb_data = cdev - > private - > cmb ;
if ( ! cmb_data ) {
ret = - ENODEV ;
goto out ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:41 +04:00
if ( cmb_data - > last_update = = 0 ) {
ret = - EAGAIN ;
goto out ;
}
cmb = cmb_data - > last_block ;
time = cmb_data - > last_update - cdev - > private - > cmb_start_time ;
2005-04-17 02:20:36 +04:00
memset ( data , 0 , sizeof ( struct cmbdata ) ) ;
/* we only know values before device_busy_time */
data - > size = offsetof ( struct cmbdata , device_busy_time ) ;
2017-09-12 12:21:00 +03:00
data - > elapsed_time = tod_to_ns ( time ) ;
2005-04-17 02:20:36 +04:00
/* copy data to new structure */
2006-06-29 17:08:41 +04:00
data - > ssch_rsch_count = cmb - > ssch_rsch_count ;
data - > sample_count = cmb - > sample_count ;
2005-04-17 02:20:36 +04:00
/* time fields are converted to nanoseconds while copying */
2006-06-29 17:08:41 +04:00
data - > device_connect_time = time_to_nsec ( cmb - > device_connect_time ) ;
data - > function_pending_time = time_to_nsec ( cmb - > function_pending_time ) ;
data - > device_disconnect_time =
time_to_nsec ( cmb - > device_disconnect_time ) ;
2005-04-17 02:20:36 +04:00
data - > control_unit_queuing_time
2006-06-29 17:08:41 +04:00
= time_to_nsec ( cmb - > control_unit_queuing_time ) ;
2005-04-17 02:20:36 +04:00
data - > device_active_only_time
2006-06-29 17:08:41 +04:00
= time_to_nsec ( cmb - > device_active_only_time ) ;
ret = 0 ;
out :
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
return ret ;
}
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
static void reset_cmb ( struct ccw_device * cdev )
{
cmf_generic_reset ( cdev ) ;
2005-04-17 02:20:36 +04:00
}
2016-07-12 20:57:57 +03:00
static int cmf_enabled ( struct ccw_device * cdev )
{
int enabled ;
spin_lock_irq ( cdev - > ccwlock ) ;
enabled = ! ! cdev - > private - > cmb ;
spin_unlock_irq ( cdev - > ccwlock ) ;
return enabled ;
}
2005-04-17 02:20:36 +04:00
static struct attribute_group cmf_attr_group ;
static struct cmb_operations cmbops_basic = {
. alloc = alloc_cmb ,
. free = free_cmb ,
. set = set_cmb ,
. read = read_cmb ,
. readall = readall_cmb ,
. reset = reset_cmb ,
. attr_group = & cmf_attr_group ,
} ;
2007-10-12 18:11:35 +04:00
2005-04-17 02:20:36 +04:00
/* ******** extended cmb handling ********/
/**
* struct cmbe - extended channel measurement block
2007-10-12 18:11:16 +04:00
* @ ssch_rsch_count : number of ssch and rsch
* @ sample_count : number of samples
* @ device_connect_time : time of device connect
* @ function_pending_time : time of function pending
* @ device_disconnect_time : time of device disconnect
* @ control_unit_queuing_time : time of control unit queuing
* @ device_active_only_time : time of device active only
* @ device_busy_time : time of device busy
* @ initial_command_response_time : initial command response time
* @ reserved : unused
2005-04-17 02:20:36 +04:00
*
2007-10-12 18:11:16 +04:00
* The measurement block as used by the hardware . May be in any 64 bit physical
* location .
* The fields are described further in z / Architecture Principles of Operation ,
2005-04-17 02:20:36 +04:00
* third edition , chapter 17.
*/
struct cmbe {
u32 ssch_rsch_count ;
u32 sample_count ;
u32 device_connect_time ;
u32 function_pending_time ;
u32 device_disconnect_time ;
u32 control_unit_queuing_time ;
u32 device_active_only_time ;
u32 device_busy_time ;
u32 initial_command_response_time ;
u32 reserved [ 7 ] ;
2015-09-07 20:53:01 +03:00
} __packed __aligned ( 64 ) ;
2005-04-17 02:20:36 +04:00
2015-09-07 20:53:01 +03:00
static struct kmem_cache * cmbe_cache ;
2005-04-17 02:20:36 +04:00
2007-10-12 18:11:15 +04:00
static int alloc_cmbe ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
2015-09-07 20:52:06 +03:00
struct cmbe * cmbe ;
int ret = - ENOMEM ;
2006-06-29 17:08:41 +04:00
2015-09-07 20:53:01 +03:00
cmbe = kmem_cache_zalloc ( cmbe_cache , GFP_KERNEL ) ;
2005-04-17 02:20:36 +04:00
if ( ! cmbe )
2015-09-07 20:52:06 +03:00
return ret ;
2015-09-07 20:53:01 +03:00
cmb_data = kzalloc ( sizeof ( * cmb_data ) , GFP_KERNEL ) ;
2015-09-07 20:52:06 +03:00
if ( ! cmb_data )
2006-06-29 17:08:41 +04:00
goto out_free ;
2015-09-07 20:52:06 +03:00
2006-06-29 17:08:41 +04:00
cmb_data - > last_block = kzalloc ( sizeof ( struct cmbe ) , GFP_KERNEL ) ;
2015-09-07 20:52:06 +03:00
if ( ! cmb_data - > last_block )
2006-06-29 17:08:41 +04:00
goto out_free ;
2015-09-07 20:52:06 +03:00
2015-09-07 20:53:01 +03:00
cmb_data - > size = sizeof ( * cmbe ) ;
2006-06-29 17:08:41 +04:00
cmb_data - > hw_block = cmbe ;
2015-09-07 20:52:06 +03:00
spin_lock ( & cmb_area . lock ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
if ( cdev - > private - > cmb )
goto out_unlock ;
2006-06-29 17:08:41 +04:00
cdev - > private - > cmb = cmb_data ;
2005-04-17 02:20:36 +04:00
/* activate global measurement if this is the first channel */
if ( list_empty ( & cmb_area . list ) )
2016-06-20 15:03:38 +03:00
cmf_activate ( NULL , CMF_ON ) ;
2005-04-17 02:20:36 +04:00
list_add_tail ( & cdev - > private - > cmb_list , & cmb_area . list ) ;
2015-09-07 20:52:06 +03:00
spin_unlock_irq ( cdev - > ccwlock ) ;
spin_unlock ( & cmb_area . lock ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2015-09-07 20:52:06 +03:00
out_unlock :
spin_unlock_irq ( cdev - > ccwlock ) ;
spin_unlock ( & cmb_area . lock ) ;
ret = - EBUSY ;
2006-06-29 17:08:41 +04:00
out_free :
if ( cmb_data )
kfree ( cmb_data - > last_block ) ;
kfree ( cmb_data ) ;
2015-09-07 20:53:01 +03:00
kmem_cache_free ( cmbe_cache , cmbe ) ;
2006-06-29 17:08:41 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:15 +04:00
static void free_cmbe ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
2015-09-07 20:52:06 +03:00
spin_lock ( & cmb_area . lock ) ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2006-06-29 17:08:41 +04:00
cmb_data = cdev - > private - > cmb ;
2005-04-17 02:20:36 +04:00
cdev - > private - > cmb = NULL ;
2015-09-07 20:52:31 +03:00
if ( cmb_data ) {
2006-06-29 17:08:41 +04:00
kfree ( cmb_data - > last_block ) ;
2015-09-07 20:53:01 +03:00
kmem_cache_free ( cmbe_cache , cmb_data - > hw_block ) ;
2015-09-07 20:52:31 +03:00
}
2006-06-29 17:08:41 +04:00
kfree ( cmb_data ) ;
2005-04-17 02:20:36 +04:00
/* deactivate global measurement if this is the last channel */
list_del_init ( & cdev - > private - > cmb_list ) ;
if ( list_empty ( & cmb_area . list ) )
2016-06-20 15:03:38 +03:00
cmf_activate ( NULL , CMF_OFF ) ;
2015-09-07 20:52:06 +03:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
spin_unlock ( & cmb_area . lock ) ;
}
2006-06-29 17:08:41 +04:00
static int set_cmbe ( struct ccw_device * cdev , u32 mme )
2005-04-17 02:20:36 +04:00
{
unsigned long mba ;
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
if ( ! cdev - > private - > cmb ) {
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
2006-06-29 17:08:41 +04:00
}
cmb_data = cdev - > private - > cmb ;
2015-09-07 20:53:01 +03:00
mba = mme ? ( unsigned long ) cmb_data - > hw_block : 0 ;
2006-06-29 17:08:41 +04:00
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
2005-04-17 02:20:36 +04:00
return set_schib_wait ( cdev , mme , 1 , mba ) ;
}
2007-10-12 18:11:15 +04:00
static u64 read_cmbe ( struct ccw_device * cdev , int index )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
struct cmb_data * cmb_data ;
unsigned long flags ;
2017-09-07 14:18:40 +03:00
struct cmbe * cmb ;
2017-09-08 22:01:38 +03:00
u64 ret = 0 ;
2017-09-07 14:18:40 +03:00
u32 val ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
cmb_data = cdev - > private - > cmb ;
2017-09-07 14:18:40 +03:00
if ( ! cmb_data )
2006-06-29 17:08:41 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
2017-09-07 14:18:40 +03:00
cmb = cmb_data - > hw_block ;
2005-04-17 02:20:36 +04:00
switch ( index ) {
2017-09-08 22:01:38 +03:00
case avg_utilization :
ret = __cmb_utilization ( cmb - > device_connect_time ,
cmb - > function_pending_time ,
cmb - > device_disconnect_time ,
cdev - > private - > cmb_start_time ) ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_ssch_rsch_count :
2006-06-29 17:08:41 +04:00
ret = cmb - > ssch_rsch_count ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_sample_count :
2006-06-29 17:08:41 +04:00
ret = cmb - > sample_count ;
goto out ;
2005-04-17 02:20:36 +04:00
case cmb_device_connect_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_connect_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_function_pending_time :
2006-06-29 17:08:41 +04:00
val = cmb - > function_pending_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_device_disconnect_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_disconnect_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_control_unit_queuing_time :
2006-06-29 17:08:41 +04:00
val = cmb - > control_unit_queuing_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_device_active_only_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_active_only_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_device_busy_time :
2006-06-29 17:08:41 +04:00
val = cmb - > device_busy_time ;
2005-04-17 02:20:36 +04:00
break ;
case cmb_initial_command_response_time :
2006-06-29 17:08:41 +04:00
val = cmb - > initial_command_response_time ;
2005-04-17 02:20:36 +04:00
break ;
default :
2006-06-29 17:08:41 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:41 +04:00
ret = time_to_avg_nsec ( val , cmb - > sample_count ) ;
out :
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:15 +04:00
static int readall_cmbe ( struct ccw_device * cdev , struct cmbdata * data )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
struct cmbe * cmb ;
struct cmb_data * cmb_data ;
2005-04-17 02:20:36 +04:00
u64 time ;
2006-06-29 17:08:41 +04:00
unsigned long flags ;
int ret ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
ret = cmf_cmb_copy_wait ( cdev ) ;
if ( ret < 0 )
return ret ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( cdev - > ccwlock , flags ) ;
2006-06-29 17:08:41 +04:00
cmb_data = cdev - > private - > cmb ;
if ( ! cmb_data ) {
ret = - ENODEV ;
goto out ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:41 +04:00
if ( cmb_data - > last_update = = 0 ) {
ret = - EAGAIN ;
goto out ;
}
time = cmb_data - > last_update - cdev - > private - > cmb_start_time ;
2005-04-17 02:20:36 +04:00
memset ( data , 0 , sizeof ( struct cmbdata ) ) ;
/* we only know values before device_busy_time */
data - > size = offsetof ( struct cmbdata , device_busy_time ) ;
2017-09-12 12:21:00 +03:00
data - > elapsed_time = tod_to_ns ( time ) ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
cmb = cmb_data - > last_block ;
2005-04-17 02:20:36 +04:00
/* copy data to new structure */
2006-06-29 17:08:41 +04:00
data - > ssch_rsch_count = cmb - > ssch_rsch_count ;
data - > sample_count = cmb - > sample_count ;
2005-04-17 02:20:36 +04:00
/* time fields are converted to nanoseconds while copying */
2006-06-29 17:08:41 +04:00
data - > device_connect_time = time_to_nsec ( cmb - > device_connect_time ) ;
data - > function_pending_time = time_to_nsec ( cmb - > function_pending_time ) ;
data - > device_disconnect_time =
time_to_nsec ( cmb - > device_disconnect_time ) ;
2005-04-17 02:20:36 +04:00
data - > control_unit_queuing_time
2006-06-29 17:08:41 +04:00
= time_to_nsec ( cmb - > control_unit_queuing_time ) ;
2005-04-17 02:20:36 +04:00
data - > device_active_only_time
2006-06-29 17:08:41 +04:00
= time_to_nsec ( cmb - > device_active_only_time ) ;
data - > device_busy_time = time_to_nsec ( cmb - > device_busy_time ) ;
2005-04-17 02:20:36 +04:00
data - > initial_command_response_time
2006-06-29 17:08:41 +04:00
= time_to_nsec ( cmb - > initial_command_response_time ) ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:41 +04:00
ret = 0 ;
out :
spin_unlock_irqrestore ( cdev - > ccwlock , flags ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:41 +04:00
static void reset_cmbe ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2006-06-29 17:08:41 +04:00
cmf_generic_reset ( cdev ) ;
}
2005-04-17 02:20:36 +04:00
static struct attribute_group cmf_attr_group_ext ;
static struct cmb_operations cmbops_extended = {
. alloc = alloc_cmbe ,
. free = free_cmbe ,
. set = set_cmbe ,
. read = read_cmbe ,
. readall = readall_cmbe ,
. reset = reset_cmbe ,
. attr_group = & cmf_attr_group_ext ,
} ;
2007-10-12 18:11:15 +04:00
static ssize_t cmb_show_attr ( struct device * dev , char * buf , enum cmb_index idx )
2005-04-17 02:20:36 +04:00
{
return sprintf ( buf , " %lld \n " ,
( unsigned long long ) cmf_read ( to_ccwdev ( dev ) , idx ) ) ;
}
2007-10-12 18:11:15 +04:00
static ssize_t cmb_show_avg_sample_interval ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2005-04-17 02:20:36 +04:00
{
2017-09-07 14:18:40 +03:00
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2005-04-17 02:20:36 +04:00
unsigned long count ;
2017-09-07 14:18:40 +03:00
long interval ;
2005-04-17 02:20:36 +04:00
count = cmf_read ( cdev , cmb_sample_count ) ;
2006-06-29 17:08:41 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
if ( count ) {
2017-09-07 14:18:40 +03:00
interval = get_tod_clock ( ) - cdev - > private - > cmb_start_time ;
2017-09-12 12:21:00 +03:00
interval = tod_to_ns ( interval ) ;
2005-04-17 02:20:36 +04:00
interval / = count ;
2006-06-29 17:08:41 +04:00
} else
2005-04-17 02:20:36 +04:00
interval = - 1 ;
2006-06-29 17:08:41 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
return sprintf ( buf , " %ld \n " , interval ) ;
}
2007-10-12 18:11:15 +04:00
static ssize_t cmb_show_avg_utilization ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2005-04-17 02:20:36 +04:00
{
2017-09-08 22:01:38 +03:00
unsigned long u = cmf_read ( to_ccwdev ( dev ) , avg_utilization ) ;
2005-04-17 02:20:36 +04:00
2017-09-08 22:01:38 +03:00
return sprintf ( buf , " %02lu.%01lu%% \n " , u / 10 , u % 10 ) ;
2005-04-17 02:20:36 +04:00
}
# define cmf_attr(name) \
2007-10-12 18:11:15 +04:00
static ssize_t show_ # # name ( struct device * dev , \
struct device_attribute * attr , char * buf ) \
{ return cmb_show_attr ( ( dev ) , buf , cmb_ # # name ) ; } \
static DEVICE_ATTR ( name , 0444 , show_ # # name , NULL ) ;
2005-04-17 02:20:36 +04:00
# define cmf_attr_avg(name) \
2007-10-12 18:11:15 +04:00
static ssize_t show_avg_ # # name ( struct device * dev , \
struct device_attribute * attr , char * buf ) \
{ return cmb_show_attr ( ( dev ) , buf , cmb_ # # name ) ; } \
static DEVICE_ATTR ( avg_ # # name , 0444 , show_avg_ # # name , NULL ) ;
2005-04-17 02:20:36 +04:00
cmf_attr ( ssch_rsch_count ) ;
cmf_attr ( sample_count ) ;
cmf_attr_avg ( device_connect_time ) ;
cmf_attr_avg ( function_pending_time ) ;
cmf_attr_avg ( device_disconnect_time ) ;
cmf_attr_avg ( control_unit_queuing_time ) ;
cmf_attr_avg ( device_active_only_time ) ;
cmf_attr_avg ( device_busy_time ) ;
cmf_attr_avg ( initial_command_response_time ) ;
2007-10-12 18:11:15 +04:00
static DEVICE_ATTR ( avg_sample_interval , 0444 , cmb_show_avg_sample_interval ,
NULL ) ;
2005-04-17 02:20:36 +04:00
static DEVICE_ATTR ( avg_utilization , 0444 , cmb_show_avg_utilization , NULL ) ;
static struct attribute * cmf_attributes [ ] = {
& dev_attr_avg_sample_interval . attr ,
& dev_attr_avg_utilization . attr ,
& dev_attr_ssch_rsch_count . attr ,
& dev_attr_sample_count . attr ,
& dev_attr_avg_device_connect_time . attr ,
& dev_attr_avg_function_pending_time . attr ,
& dev_attr_avg_device_disconnect_time . attr ,
& dev_attr_avg_control_unit_queuing_time . attr ,
& dev_attr_avg_device_active_only_time . attr ,
2006-07-12 18:41:55 +04:00
NULL ,
2005-04-17 02:20:36 +04:00
} ;
static struct attribute_group cmf_attr_group = {
. name = " cmf " ,
. attrs = cmf_attributes ,
} ;
static struct attribute * cmf_attributes_ext [ ] = {
& dev_attr_avg_sample_interval . attr ,
& dev_attr_avg_utilization . attr ,
& dev_attr_ssch_rsch_count . attr ,
& dev_attr_sample_count . attr ,
& dev_attr_avg_device_connect_time . attr ,
& dev_attr_avg_function_pending_time . attr ,
& dev_attr_avg_device_disconnect_time . attr ,
& dev_attr_avg_control_unit_queuing_time . attr ,
& dev_attr_avg_device_active_only_time . attr ,
& dev_attr_avg_device_busy_time . attr ,
& dev_attr_avg_initial_command_response_time . attr ,
2006-07-12 18:41:55 +04:00
NULL ,
2005-04-17 02:20:36 +04:00
} ;
static struct attribute_group cmf_attr_group_ext = {
. name = " cmf " ,
. attrs = cmf_attributes_ext ,
} ;
2007-10-12 18:11:15 +04:00
static ssize_t cmb_enable_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
2005-04-17 02:20:36 +04:00
{
2015-09-07 20:50:25 +03:00
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2016-07-12 20:57:57 +03:00
return sprintf ( buf , " %d \n " , cmf_enabled ( cdev ) ) ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:15 +04:00
static ssize_t cmb_enable_store ( struct device * dev ,
struct device_attribute * attr , const char * buf ,
size_t c )
2005-04-17 02:20:36 +04:00
{
2015-09-07 20:50:25 +03:00
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2008-04-30 15:38:33 +04:00
unsigned long val ;
2015-09-07 20:50:25 +03:00
int ret ;
2008-04-30 15:38:33 +04:00
2013-07-22 05:18:15 +04:00
ret = kstrtoul ( buf , 16 , & val ) ;
2008-04-30 15:38:33 +04:00
if ( ret )
return ret ;
2005-04-17 02:20:36 +04:00
2008-04-30 15:38:33 +04:00
switch ( val ) {
case 0 :
2005-04-17 02:20:36 +04:00
ret = disable_cmf ( cdev ) ;
break ;
2008-04-30 15:38:33 +04:00
case 1 :
2005-04-17 02:20:36 +04:00
ret = enable_cmf ( cdev ) ;
break ;
2015-09-07 20:50:25 +03:00
default :
ret = - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2015-09-07 20:50:25 +03:00
return ret ? ret : c ;
2005-04-17 02:20:36 +04:00
}
2015-09-07 20:50:25 +03:00
DEVICE_ATTR_RW ( cmb_enable ) ;
2005-04-17 02:20:36 +04:00
2009-06-16 12:30:20 +04:00
int ccw_set_cmf ( struct ccw_device * cdev , int enable )
{
return cmbops - > set ( cdev , enable ? 2 : 0 ) ;
}
2007-10-12 18:11:16 +04:00
/**
* enable_cmf ( ) - switch on the channel measurement for a specific device
* @ cdev : The ccw device to be enabled
*
2018-01-23 12:46:33 +03:00
* Enable channel measurements for @ cdev . If this is called on a device
* for which channel measurement is already enabled a reset of the
* measurement data is triggered .
* Returns : % 0 for success or a negative error value .
2007-10-12 18:11:16 +04:00
* Context :
* non - atomic
*/
2007-10-12 18:11:15 +04:00
int enable_cmf ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2016-07-12 20:57:57 +03:00
int ret = 0 ;
2005-04-17 02:20:36 +04:00
2015-09-15 14:11:42 +03:00
device_lock ( & cdev - > dev ) ;
2016-07-12 20:57:57 +03:00
if ( cmf_enabled ( cdev ) ) {
cmbops - > reset ( cdev ) ;
goto out_unlock ;
}
2015-09-07 20:51:39 +03:00
get_device ( & cdev - > dev ) ;
2005-04-17 02:20:36 +04:00
ret = cmbops - > alloc ( cdev ) ;
if ( ret )
2015-09-15 14:11:42 +03:00
goto out ;
cmbops - > reset ( cdev ) ;
ret = sysfs_create_group ( & cdev - > dev . kobj , cmbops - > attr_group ) ;
if ( ret ) {
cmbops - > free ( cdev ) ;
goto out ;
}
2005-04-17 02:20:36 +04:00
ret = cmbops - > set ( cdev , 2 ) ;
if ( ret ) {
2015-09-15 14:11:42 +03:00
sysfs_remove_group ( & cdev - > dev . kobj , cmbops - > attr_group ) ;
2005-04-17 02:20:36 +04:00
cmbops - > free ( cdev ) ;
}
2015-09-15 14:11:42 +03:00
out :
2015-09-07 20:51:39 +03:00
if ( ret )
put_device ( & cdev - > dev ) ;
2016-07-12 20:57:57 +03:00
out_unlock :
2015-09-15 14:11:42 +03:00
device_unlock ( & cdev - > dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2007-10-12 18:11:16 +04:00
/**
2015-09-15 14:11:42 +03:00
* __disable_cmf ( ) - switch off the channel measurement for a specific device
2007-10-12 18:11:16 +04:00
* @ cdev : The ccw device to be disabled
*
2018-01-23 12:46:33 +03:00
* Returns : % 0 for success or a negative error value .
2007-10-12 18:11:16 +04:00
*
* Context :
2015-09-15 14:11:42 +03:00
* non - atomic , device_lock ( ) held .
2007-10-12 18:11:16 +04:00
*/
2015-09-15 14:11:42 +03:00
int __disable_cmf ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
int ret ;
ret = cmbops - > set ( cdev , 0 ) ;
if ( ret )
return ret ;
2015-09-15 14:11:42 +03:00
2005-04-17 02:20:36 +04:00
sysfs_remove_group ( & cdev - > dev . kobj , cmbops - > attr_group ) ;
2015-09-15 14:11:42 +03:00
cmbops - > free ( cdev ) ;
2015-09-07 20:51:39 +03:00
put_device ( & cdev - > dev ) ;
2015-09-15 14:11:42 +03:00
return ret ;
}
/**
* disable_cmf ( ) - switch off the channel measurement for a specific device
* @ cdev : The ccw device to be disabled
*
2018-01-23 12:46:33 +03:00
* Returns : % 0 for success or a negative error value .
2015-09-15 14:11:42 +03:00
*
* Context :
* non - atomic
*/
int disable_cmf ( struct ccw_device * cdev )
{
int ret ;
device_lock ( & cdev - > dev ) ;
ret = __disable_cmf ( cdev ) ;
device_unlock ( & cdev - > dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2007-10-12 18:11:16 +04:00
/**
* cmf_read ( ) - read one value from the current channel measurement block
* @ cdev : the channel to be read
* @ index : the index of the value to be read
*
2018-01-23 12:46:33 +03:00
* Returns : The value read or % 0 if the value cannot be read .
2007-10-12 18:11:16 +04:00
*
* Context :
* any
*/
2007-10-12 18:11:15 +04:00
u64 cmf_read ( struct ccw_device * cdev , int index )
2005-04-17 02:20:36 +04:00
{
return cmbops - > read ( cdev , index ) ;
}
2007-10-12 18:11:16 +04:00
/**
* cmf_readall ( ) - read the current channel measurement block
* @ cdev : the channel to be read
* @ data : a pointer to a data block that will be filled
*
2018-01-23 12:46:33 +03:00
* Returns : % 0 on success , a negative error value otherwise .
2007-10-12 18:11:16 +04:00
*
* Context :
* any
*/
2007-10-12 18:11:15 +04:00
int cmf_readall ( struct ccw_device * cdev , struct cmbdata * data )
2005-04-17 02:20:36 +04:00
{
return cmbops - > readall ( cdev , data ) ;
}
2006-06-29 17:08:41 +04:00
/* Reenable cmf when a disconnected device becomes available again. */
int cmf_reenable ( struct ccw_device * cdev )
{
cmbops - > reset ( cdev ) ;
return cmbops - > set ( cdev , 2 ) ;
}
2015-09-09 11:29:59 +03:00
/**
* cmf_reactivate ( ) - reactivate measurement block updates
*
* Use this during resume from hibernate .
*/
void cmf_reactivate ( void )
{
spin_lock ( & cmb_area . lock ) ;
if ( ! list_empty ( & cmb_area . list ) )
2016-06-20 15:03:38 +03:00
cmf_activate ( cmb_area . mem , CMF_ON ) ;
2015-09-09 11:29:59 +03:00
spin_unlock ( & cmb_area . lock ) ;
}
2015-09-07 20:53:01 +03:00
static int __init init_cmbe ( void )
{
cmbe_cache = kmem_cache_create ( " cmbe_cache " , sizeof ( struct cmbe ) ,
__alignof__ ( struct cmbe ) , 0 , NULL ) ;
return cmbe_cache ? 0 : - ENOMEM ;
}
2007-10-12 18:11:15 +04:00
static int __init init_cmf ( void )
2005-04-17 02:20:36 +04:00
{
char * format_string ;
2015-09-07 20:53:01 +03:00
char * detect_string ;
int ret ;
2005-04-17 02:20:36 +04:00
2007-10-12 18:11:15 +04:00
/*
* If the user did not give a parameter , see if we are running on a
* machine supporting extended measurement blocks , otherwise fall back
* to basic mode .
*/
2005-04-17 02:20:36 +04:00
if ( format = = CMF_AUTODETECT ) {
2008-07-14 11:58:57 +04:00
if ( ! css_general_characteristics . ext_mb ) {
2005-04-17 02:20:36 +04:00
format = CMF_BASIC ;
} else {
format = CMF_EXTENDED ;
}
detect_string = " autodetected " ;
} else {
detect_string = " parameter " ;
}
switch ( format ) {
case CMF_BASIC :
format_string = " basic " ;
cmbops = & cmbops_basic ;
break ;
case CMF_EXTENDED :
2007-10-12 18:11:15 +04:00
format_string = " extended " ;
2005-04-17 02:20:36 +04:00
cmbops = & cmbops_extended ;
2015-09-07 20:53:01 +03:00
ret = init_cmbe ( ) ;
if ( ret )
return ret ;
2005-04-17 02:20:36 +04:00
break ;
default :
2015-09-07 20:53:01 +03:00
return - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2008-12-25 15:39:36 +03:00
pr_info ( " Channel measurement facility initialized using format "
" %s (mode %s) \n " , format_string , detect_string ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2016-10-30 23:37:24 +03:00
device_initcall ( init_cmf ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL_GPL ( enable_cmf ) ;
EXPORT_SYMBOL_GPL ( disable_cmf ) ;
EXPORT_SYMBOL_GPL ( cmf_read ) ;
EXPORT_SYMBOL_GPL ( cmf_readall ) ;