2017-11-14 20:38:02 +03:00
// SPDX-License-Identifier: GPL-2.0
2005-04-17 02:20:36 +04:00
/*
2009-06-16 12:30:22 +04:00
* driver for channel subsystem
2005-04-17 02:20:36 +04:00
*
2010-10-25 18:10:28 +04:00
* Copyright IBM Corp . 2002 , 2010
2009-06-16 12:30:22 +04:00
*
* Author ( s ) : Arnd Bergmann ( arndb @ de . ibm . com )
* Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-17 02:20:36 +04:00
*/
2008-12-25 15:39:36 +03:00
# define KMSG_COMPONENT "cio"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2016-10-30 23:37:24 +03:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
# include <linux/device.h>
# include <linux/slab.h>
# include <linux/errno.h>
# include <linux/list.h>
2007-10-12 18:11:20 +04:00
# include <linux/reboot.h>
2010-02-27 00:37:25 +03:00
# include <linux/proc_fs.h>
2019-04-02 19:47:29 +03:00
# include <linux/genalloc.h>
# include <linux/dma-mapping.h>
2008-07-14 11:58:58 +04:00
# include <asm/isc.h>
2009-03-26 17:24:01 +03:00
# include <asm/crw.h>
2005-04-17 02:20:36 +04:00
# include "css.h"
# include "cio.h"
2018-06-26 16:09:32 +03:00
# include "blacklist.h"
2005-04-17 02:20:36 +04:00
# include "cio_debug.h"
# include "ioasm.h"
# include "chsc.h"
2006-06-29 16:57:03 +04:00
# include "device.h"
2007-04-27 18:01:34 +04:00
# include "idset.h"
2007-04-27 18:01:35 +04:00
# include "chp.h"
2005-04-17 02:20:36 +04:00
int css_init_done = 0 ;
2009-09-23 00:58:37 +04:00
int max_ssid ;
2005-04-17 02:20:36 +04:00
2016-11-08 16:28:03 +03:00
# define MAX_CSS_IDX 0
struct channel_subsystem * channel_subsystems [ MAX_CSS_IDX + 1 ] ;
2011-03-15 19:08:31 +03:00
static struct bus_type css_bus_type ;
2005-04-17 02:20:36 +04:00
2007-02-05 23:18:53 +03:00
int
2006-01-06 11:19:22 +03:00
for_each_subchannel ( int ( * fn ) ( struct subchannel_id , void * ) , void * data )
{
struct subchannel_id schid ;
int ret ;
init_subchannel_id ( & schid ) ;
do {
2006-01-06 11:19:25 +03:00
do {
ret = fn ( schid , data ) ;
if ( ret )
break ;
} while ( schid . sch_no + + < __MAX_SUBCHANNEL ) ;
schid . sch_no = 0 ;
} while ( schid . ssid + + < max_ssid ) ;
2006-01-06 11:19:22 +03:00
return ret ;
}
2008-01-26 16:10:48 +03:00
struct cb_data {
void * data ;
struct idset * set ;
int ( * fn_known_sch ) ( struct subchannel * , void * ) ;
int ( * fn_unknown_sch ) ( struct subchannel_id , void * ) ;
} ;
static int call_fn_known_sch ( struct device * dev , void * data )
{
struct subchannel * sch = to_subchannel ( dev ) ;
struct cb_data * cb = data ;
int rc = 0 ;
2013-11-26 17:59:21 +04:00
if ( cb - > set )
idset_sch_del ( cb - > set , sch - > schid ) ;
2008-01-26 16:10:48 +03:00
if ( cb - > fn_known_sch )
rc = cb - > fn_known_sch ( sch , cb - > data ) ;
return rc ;
}
static int call_fn_unknown_sch ( struct subchannel_id schid , void * data )
{
struct cb_data * cb = data ;
int rc = 0 ;
if ( idset_sch_contains ( cb - > set , schid ) )
rc = cb - > fn_unknown_sch ( schid , cb - > data ) ;
return rc ;
}
2009-03-26 17:24:11 +03:00
static int call_fn_all_sch ( struct subchannel_id schid , void * data )
{
struct cb_data * cb = data ;
struct subchannel * sch ;
int rc = 0 ;
sch = get_subchannel_by_schid ( schid ) ;
if ( sch ) {
if ( cb - > fn_known_sch )
rc = cb - > fn_known_sch ( sch , cb - > data ) ;
put_device ( & sch - > dev ) ;
} else {
if ( cb - > fn_unknown_sch )
rc = cb - > fn_unknown_sch ( schid , cb - > data ) ;
}
return rc ;
}
2008-01-26 16:10:48 +03:00
int for_each_subchannel_staged ( int ( * fn_known ) ( struct subchannel * , void * ) ,
int ( * fn_unknown ) ( struct subchannel_id ,
void * ) , void * data )
{
struct cb_data cb ;
int rc ;
cb . data = data ;
cb . fn_known_sch = fn_known ;
cb . fn_unknown_sch = fn_unknown ;
2009-03-26 17:24:11 +03:00
2013-11-26 17:59:21 +04:00
if ( fn_known & & ! fn_unknown ) {
/* Skip idset allocation in case of known-only loop. */
cb . set = NULL ;
return bus_for_each_dev ( & css_bus_type , NULL , & cb ,
call_fn_known_sch ) ;
}
2009-03-26 17:24:11 +03:00
cb . set = idset_sch_new ( ) ;
if ( ! cb . set )
/* fall back to brute force scanning in case of oom */
return for_each_subchannel ( call_fn_all_sch , & cb ) ;
idset_fill ( cb . set ) ;
2008-01-26 16:10:48 +03:00
/* Process registered subchannels. */
rc = bus_for_each_dev ( & css_bus_type , NULL , & cb , call_fn_known_sch ) ;
if ( rc )
goto out ;
/* Process unregistered subchannels. */
if ( fn_unknown )
rc = for_each_subchannel ( call_fn_unknown_sch , & cb ) ;
out :
idset_free ( cb . set ) ;
return rc ;
}
2009-12-07 14:51:18 +03:00
static void css_sch_todo ( struct work_struct * work ) ;
2013-04-13 15:08:01 +04:00
static int css_sch_create_locks ( struct subchannel * sch )
{
sch - > lock = kmalloc ( sizeof ( * sch - > lock ) , GFP_KERNEL ) ;
if ( ! sch - > lock )
return - ENOMEM ;
spin_lock_init ( sch - > lock ) ;
mutex_init ( & sch - > reg_mutex ) ;
return 0 ;
}
2013-04-13 14:58:55 +04:00
static void css_subchannel_release ( struct device * dev )
{
2013-04-13 15:01:50 +04:00
struct subchannel * sch = to_subchannel ( dev ) ;
2013-04-13 14:58:55 +04:00
2013-04-13 15:01:50 +04:00
sch - > config . intparm = 0 ;
cio_commit_config ( sch ) ;
2019-06-13 14:08:15 +03:00
kfree ( sch - > driver_override ) ;
2013-04-13 15:01:50 +04:00
kfree ( sch - > lock ) ;
kfree ( sch ) ;
2013-04-13 14:58:55 +04:00
}
2018-06-26 16:09:32 +03:00
static int css_validate_subchannel ( struct subchannel_id schid ,
struct schib * schib )
{
int err ;
switch ( schib - > pmcw . st ) {
case SUBCHANNEL_TYPE_IO :
case SUBCHANNEL_TYPE_MSG :
if ( ! css_sch_is_valid ( schib ) )
err = - ENODEV ;
else if ( is_blacklisted ( schid . ssid , schib - > pmcw . dev ) ) {
CIO_MSG_EVENT ( 6 , " Blacklisted device detected "
" at devno %04X, subchannel set %x \n " ,
schib - > pmcw . dev , schid . ssid ) ;
err = - ENODEV ;
} else
err = 0 ;
break ;
default :
err = 0 ;
}
if ( err )
goto out ;
CIO_MSG_EVENT ( 4 , " Subchannel 0.%x.%04x reports subchannel type %04X \n " ,
schid . ssid , schid . sch_no , schib - > pmcw . st ) ;
out :
return err ;
}
struct subchannel * css_alloc_subchannel ( struct subchannel_id schid ,
struct schib * schib )
2005-04-17 02:20:36 +04:00
{
struct subchannel * sch ;
int ret ;
2018-06-26 16:09:32 +03:00
ret = css_validate_subchannel ( schid , schib ) ;
2018-06-25 12:23:26 +03:00
if ( ret < 0 )
return ERR_PTR ( ret ) ;
2013-04-13 15:08:01 +04:00
sch = kzalloc ( sizeof ( * sch ) , GFP_KERNEL | GFP_DMA ) ;
if ( ! sch )
2005-04-17 02:20:36 +04:00
return ERR_PTR ( - ENOMEM ) ;
2013-04-13 15:08:01 +04:00
2018-06-25 12:23:26 +03:00
sch - > schid = schid ;
2018-06-26 16:09:32 +03:00
sch - > schib = * schib ;
sch - > st = schib - > pmcw . st ;
2013-04-13 15:08:01 +04:00
ret = css_sch_create_locks ( sch ) ;
if ( ret )
goto err ;
2009-12-07 14:51:18 +03:00
INIT_WORK ( & sch - > todo_work , css_sch_todo ) ;
2013-04-13 14:58:55 +04:00
sch - > dev . release = & css_subchannel_release ;
device_initialize ( & sch - > dev ) ;
2019-04-02 19:47:29 +03:00
/*
* The physical addresses of some the dma structures that can
* belong to a subchannel need to fit 31 bit width ( e . g . ccw ) .
*/
sch - > dev . coherent_dma_mask = DMA_BIT_MASK ( 31 ) ;
2019-09-30 18:38:02 +03:00
/*
* But we don ' t have such restrictions imposed on the stuff that
* is handled by the streaming API .
*/
sch - > dma_mask = DMA_BIT_MASK ( 64 ) ;
sch - > dev . dma_mask = & sch - > dma_mask ;
2005-04-17 02:20:36 +04:00
return sch ;
2013-04-13 15:08:01 +04:00
err :
kfree ( sch ) ;
return ERR_PTR ( ret ) ;
2005-04-17 02:20:36 +04:00
}
2007-07-27 14:29:10 +04:00
static int css_sch_device_register ( struct subchannel * sch )
2006-07-12 18:39:50 +04:00
{
int ret ;
mutex_lock ( & sch - > reg_mutex ) ;
2009-09-11 12:28:25 +04:00
dev_set_name ( & sch - > dev , " 0.%x.%04x " , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2013-04-13 14:58:55 +04:00
ret = device_add ( & sch - > dev ) ;
2006-07-12 18:39:50 +04:00
mutex_unlock ( & sch - > reg_mutex ) ;
return ret ;
}
2008-07-14 11:58:47 +04:00
/**
* css_sch_device_unregister - unregister a subchannel
* @ sch : subchannel to be unregistered
*/
2006-07-12 18:39:50 +04:00
void css_sch_device_unregister ( struct subchannel * sch )
{
mutex_lock ( & sch - > reg_mutex ) ;
2008-07-14 11:59:20 +04:00
if ( device_is_registered ( & sch - > dev ) )
device_unregister ( & sch - > dev ) ;
2006-07-12 18:39:50 +04:00
mutex_unlock ( & sch - > reg_mutex ) ;
}
2008-07-14 11:58:47 +04:00
EXPORT_SYMBOL_GPL ( css_sch_device_unregister ) ;
2006-07-12 18:39:50 +04:00
2007-04-27 18:01:35 +04:00
static void ssd_from_pmcw ( struct chsc_ssd_info * ssd , struct pmcw * pmcw )
{
int i ;
int mask ;
memset ( ssd , 0 , sizeof ( struct chsc_ssd_info ) ) ;
ssd - > path_mask = pmcw - > pim ;
for ( i = 0 ; i < 8 ; i + + ) {
mask = 0x80 > > i ;
if ( pmcw - > pim & mask ) {
chp_id_init ( & ssd - > chpid [ i ] ) ;
ssd - > chpid [ i ] . id = pmcw - > chpid [ i ] ;
}
}
}
static void ssd_register_chpids ( struct chsc_ssd_info * ssd )
{
int i ;
int mask ;
for ( i = 0 ; i < 8 ; i + + ) {
mask = 0x80 > > i ;
if ( ssd - > path_mask & mask )
2018-06-13 17:26:23 +03:00
chp_new ( ssd - > chpid [ i ] ) ;
2007-04-27 18:01:35 +04:00
}
}
void css_update_ssd_info ( struct subchannel * sch )
{
int ret ;
2013-04-13 15:03:54 +04:00
ret = chsc_get_ssd_info ( sch - > schid , & sch - > ssd_info ) ;
if ( ret )
2007-04-27 18:01:35 +04:00
ssd_from_pmcw ( & sch - > ssd_info , & sch - > schib . pmcw ) ;
2013-04-13 15:03:54 +04:00
ssd_register_chpids ( & sch - > ssd_info ) ;
2007-04-27 18:01:35 +04:00
}
2008-07-14 11:58:44 +04:00
static ssize_t type_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
return sprintf ( buf , " %01x \n " , sch - > st ) ;
}
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( type ) ;
2008-07-14 11:58:44 +04:00
static ssize_t modalias_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
return sprintf ( buf , " css:t%01X \n " , sch - > st ) ;
}
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( modalias ) ;
2008-07-14 11:58:44 +04:00
2019-06-13 14:08:15 +03:00
static ssize_t driver_override_store ( struct device * dev ,
struct device_attribute * attr ,
const char * buf , size_t count )
{
struct subchannel * sch = to_subchannel ( dev ) ;
char * driver_override , * old , * cp ;
/* We need to keep extra room for a newline */
if ( count > = ( PAGE_SIZE - 1 ) )
return - EINVAL ;
driver_override = kstrndup ( buf , count , GFP_KERNEL ) ;
if ( ! driver_override )
return - ENOMEM ;
cp = strchr ( driver_override , ' \n ' ) ;
if ( cp )
* cp = ' \0 ' ;
device_lock ( dev ) ;
old = sch - > driver_override ;
if ( strlen ( driver_override ) ) {
sch - > driver_override = driver_override ;
} else {
kfree ( driver_override ) ;
sch - > driver_override = NULL ;
}
device_unlock ( dev ) ;
kfree ( old ) ;
return count ;
}
static ssize_t driver_override_show ( struct device * dev ,
struct device_attribute * attr , char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
ssize_t len ;
device_lock ( dev ) ;
len = snprintf ( buf , PAGE_SIZE , " %s \n " , sch - > driver_override ) ;
device_unlock ( dev ) ;
return len ;
}
static DEVICE_ATTR_RW ( driver_override ) ;
2008-07-14 11:58:44 +04:00
static struct attribute * subch_attrs [ ] = {
& dev_attr_type . attr ,
& dev_attr_modalias . attr ,
2019-06-13 14:08:15 +03:00
& dev_attr_driver_override . attr ,
2008-07-14 11:58:44 +04:00
NULL ,
} ;
static struct attribute_group subch_attr_group = {
. attrs = subch_attrs ,
} ;
2009-06-24 21:06:31 +04:00
static const struct attribute_group * default_subch_attr_groups [ ] = {
2008-07-14 11:58:44 +04:00
& subch_attr_group ,
NULL ,
} ;
2017-05-15 16:49:07 +03:00
static ssize_t chpids_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
struct chsc_ssd_info * ssd = & sch - > ssd_info ;
ssize_t ret = 0 ;
int mask ;
int chp ;
for ( chp = 0 ; chp < 8 ; chp + + ) {
mask = 0x80 > > chp ;
if ( ssd - > path_mask & mask )
ret + = sprintf ( buf + ret , " %02x " , ssd - > chpid [ chp ] . id ) ;
else
ret + = sprintf ( buf + ret , " 00 " ) ;
}
ret + = sprintf ( buf + ret , " \n " ) ;
return ret ;
}
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( chpids ) ;
2017-05-15 16:49:07 +03:00
static ssize_t pimpampom_show ( struct device * dev ,
struct device_attribute * attr ,
char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
struct pmcw * pmcw = & sch - > schib . pmcw ;
return sprintf ( buf , " %02x %02x %02x \n " ,
pmcw - > pim , pmcw - > pam , pmcw - > pom ) ;
}
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( pimpampom ) ;
2017-05-15 16:49:07 +03:00
static struct attribute * io_subchannel_type_attrs [ ] = {
& dev_attr_chpids . attr ,
& dev_attr_pimpampom . attr ,
NULL ,
} ;
ATTRIBUTE_GROUPS ( io_subchannel_type ) ;
static const struct device_type io_subchannel_type = {
. groups = io_subchannel_type_groups ,
} ;
2013-04-13 15:03:54 +04:00
int css_register_subchannel ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
int ret ;
/* Initialize the subchannel structure */
2007-10-12 18:11:13 +04:00
sch - > dev . parent = & channel_subsystems [ 0 ] - > device ;
2005-04-17 02:20:36 +04:00
sch - > dev . bus = & css_bus_type ;
2008-07-14 11:58:44 +04:00
sch - > dev . groups = default_subch_attr_groups ;
2017-05-15 16:49:07 +03:00
if ( sch - > st = = SUBCHANNEL_TYPE_IO )
sch - > dev . type = & io_subchannel_type ;
2007-10-22 14:52:41 +04:00
/*
* We don ' t want to generate uevents for I / O subchannels that don ' t
* have a working ccw device behind them since they will be
* unregistered before they can be used anyway , so we delay the add
* uevent until after device recognition was successful .
2008-07-14 11:58:44 +04:00
* Note that we suppress the uevent for all subchannel types ;
* the subchannel driver can decide itself when it wants to inform
* userspace of its existence .
2007-10-22 14:52:41 +04:00
*/
2009-03-01 16:10:49 +03:00
dev_set_uevent_suppress ( & sch - > dev , 1 ) ;
2007-04-27 18:01:35 +04:00
css_update_ssd_info ( sch ) ;
2005-04-17 02:20:36 +04:00
/* make it known to the system */
2006-07-12 18:39:50 +04:00
ret = css_sch_device_register ( sch ) ;
2006-12-08 17:54:21 +03:00
if ( ret ) {
2007-07-27 14:29:19 +04:00
CIO_MSG_EVENT ( 0 , " Could not register sch 0.%x.%04x: %d \n " ,
sch - > schid . ssid , sch - > schid . sch_no , ret ) ;
2006-12-08 17:54:21 +03:00
return ret ;
}
2008-07-14 11:58:44 +04:00
if ( ! sch - > driver ) {
/*
* No driver matched . Generate the uevent now so that
* a fitting driver module may be loaded based on the
* modalias .
*/
2009-03-01 16:10:49 +03:00
dev_set_uevent_suppress ( & sch - > dev , 0 ) ;
2008-07-14 11:58:44 +04:00
kobject_uevent ( & sch - > dev . kobj , KOBJ_ADD ) ;
}
2005-04-17 02:20:36 +04:00
return ret ;
}
2018-06-26 16:09:32 +03:00
static int css_probe_device ( struct subchannel_id schid , struct schib * schib )
2005-04-17 02:20:36 +04:00
{
struct subchannel * sch ;
2013-04-13 15:04:49 +04:00
int ret ;
2005-04-17 02:20:36 +04:00
2018-06-26 16:09:32 +03:00
sch = css_alloc_subchannel ( schid , schib ) ;
2013-04-13 15:03:54 +04:00
if ( IS_ERR ( sch ) )
return PTR_ERR ( sch ) ;
2005-04-17 02:20:36 +04:00
ret = css_register_subchannel ( sch ) ;
2013-04-13 15:01:50 +04:00
if ( ret )
put_device ( & sch - > dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2005-06-26 01:55:27 +04:00
static int
2019-06-14 20:53:59 +03:00
check_subchannel ( struct device * dev , const void * data )
2005-06-26 01:55:27 +04:00
{
struct subchannel * sch ;
2019-06-14 20:53:59 +03:00
struct subchannel_id * schid = ( void * ) data ;
2005-06-26 01:55:27 +04:00
sch = to_subchannel ( dev ) ;
2006-01-06 11:19:21 +03:00
return schid_equal ( & sch - > schid , schid ) ;
2005-06-26 01:55:27 +04:00
}
2005-04-17 02:20:36 +04:00
struct subchannel *
2006-01-06 11:19:21 +03:00
get_subchannel_by_schid ( struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
struct device * dev ;
2005-06-26 01:55:27 +04:00
dev = bus_find_device ( & css_bus_type , NULL ,
2006-10-11 17:31:47 +04:00
& schid , check_subchannel ) ;
2005-04-17 02:20:36 +04:00
2005-06-26 01:55:27 +04:00
return dev ? to_subchannel ( dev ) : NULL ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:10:45 +03:00
/**
* css_sch_is_valid ( ) - check if a subchannel is valid
* @ schib : subchannel information block for the subchannel
*/
int css_sch_is_valid ( struct schib * schib )
{
if ( ( schib - > pmcw . st = = SUBCHANNEL_TYPE_IO ) & & ! schib - > pmcw . dnv )
return 0 ;
2008-07-14 11:58:48 +04:00
if ( ( schib - > pmcw . st = = SUBCHANNEL_TYPE_MSG ) & & ! schib - > pmcw . w )
return 0 ;
2008-01-26 16:10:45 +03:00
return 1 ;
}
EXPORT_SYMBOL_GPL ( css_sch_is_valid ) ;
2006-09-20 18:00:01 +04:00
static int css_evaluate_new_subchannel ( struct subchannel_id schid , int slow )
{
struct schib schib ;
2018-06-26 16:09:32 +03:00
int ccode ;
2006-09-20 18:00:01 +04:00
if ( ! slow ) {
/* Will be done on the slow path. */
return - EAGAIN ;
}
2018-06-26 16:09:32 +03:00
/*
* The first subchannel that is not - operational ( ccode = = 3 )
* indicates that there aren ' t any more devices available .
* If stsch gets an exception , it means the current subchannel set
* is not valid .
*/
ccode = stsch ( schid , & schib ) ;
if ( ccode )
return ( ccode = = 3 ) ? - ENXIO : ccode ;
2006-09-20 18:00:01 +04:00
2018-06-26 16:09:32 +03:00
return css_probe_device ( schid , & schib ) ;
2006-09-20 18:00:01 +04:00
}
2008-07-14 11:58:45 +04:00
static int css_evaluate_known_subchannel ( struct subchannel * sch , int slow )
{
int ret = 0 ;
if ( sch - > driver ) {
if ( sch - > driver - > sch_event )
ret = sch - > driver - > sch_event ( sch , slow ) ;
else
dev_dbg ( & sch - > dev ,
" Got subchannel machine check but "
" no sch_event handler provided. \n " ) ;
}
2009-12-07 14:51:17 +03:00
if ( ret ! = 0 & & ret ! = - EAGAIN ) {
CIO_MSG_EVENT ( 2 , " eval: sch 0.%x.%04x, rc=%d \n " ,
sch - > schid . ssid , sch - > schid . sch_no , ret ) ;
}
2008-07-14 11:58:45 +04:00
return ret ;
}
2007-04-27 18:01:34 +04:00
static void css_evaluate_subchannel ( struct subchannel_id schid , int slow )
2006-09-20 18:00:01 +04:00
{
struct subchannel * sch ;
int ret ;
sch = get_subchannel_by_schid ( schid ) ;
if ( sch ) {
ret = css_evaluate_known_subchannel ( sch , slow ) ;
put_device ( & sch - > dev ) ;
} else
ret = css_evaluate_new_subchannel ( schid , slow ) ;
2007-04-27 18:01:34 +04:00
if ( ret = = - EAGAIN )
css_schedule_eval ( schid ) ;
2005-04-17 02:20:36 +04:00
}
2011-12-01 16:32:19 +04:00
/**
* css_sched_sch_todo - schedule a subchannel operation
* @ sch : subchannel
* @ todo : todo
*
* Schedule the operation identified by @ todo to be performed on the slow path
* workqueue . Do nothing if another operation with higher priority is already
* scheduled . Needs to be called with subchannel lock held .
*/
void css_sched_sch_todo ( struct subchannel * sch , enum sch_todo todo )
{
CIO_MSG_EVENT ( 4 , " sch_todo: sched sch=0.%x.%04x todo=%d \n " ,
sch - > schid . ssid , sch - > schid . sch_no , todo ) ;
if ( sch - > todo > = todo )
return ;
/* Get workqueue ref. */
if ( ! get_device ( & sch - > dev ) )
return ;
sch - > todo = todo ;
if ( ! queue_work ( cio_work_q , & sch - > todo_work ) ) {
/* Already queued, release workqueue ref. */
put_device ( & sch - > dev ) ;
}
}
2012-08-28 18:42:37 +04:00
EXPORT_SYMBOL_GPL ( css_sched_sch_todo ) ;
2011-12-01 16:32:19 +04:00
static void css_sch_todo ( struct work_struct * work )
{
struct subchannel * sch ;
enum sch_todo todo ;
int ret ;
sch = container_of ( work , struct subchannel , todo_work ) ;
/* Find out todo. */
spin_lock_irq ( sch - > lock ) ;
todo = sch - > todo ;
CIO_MSG_EVENT ( 4 , " sch_todo: sch=0.%x.%04x, todo=%d \n " , sch - > schid . ssid ,
sch - > schid . sch_no , todo ) ;
sch - > todo = SCH_TODO_NOTHING ;
spin_unlock_irq ( sch - > lock ) ;
/* Perform todo. */
switch ( todo ) {
case SCH_TODO_NOTHING :
break ;
case SCH_TODO_EVAL :
ret = css_evaluate_known_subchannel ( sch , 1 ) ;
if ( ret = = - EAGAIN ) {
spin_lock_irq ( sch - > lock ) ;
css_sched_sch_todo ( sch , todo ) ;
spin_unlock_irq ( sch - > lock ) ;
}
break ;
case SCH_TODO_UNREG :
css_sch_device_unregister ( sch ) ;
break ;
}
/* Release workqueue ref. */
put_device ( & sch - > dev ) ;
}
2007-04-27 18:01:34 +04:00
static struct idset * slow_subchannel_set ;
static spinlock_t slow_subchannel_lock ;
2009-09-23 00:58:34 +04:00
static wait_queue_head_t css_eval_wq ;
static atomic_t css_eval_scheduled ;
2007-04-27 18:01:34 +04:00
static int __init slow_subchannel_init ( void )
2005-04-17 02:20:36 +04:00
{
2007-04-27 18:01:34 +04:00
spin_lock_init ( & slow_subchannel_lock ) ;
2009-09-23 00:58:34 +04:00
atomic_set ( & css_eval_scheduled , 0 ) ;
init_waitqueue_head ( & css_eval_wq ) ;
2007-04-27 18:01:34 +04:00
slow_subchannel_set = idset_sch_new ( ) ;
if ( ! slow_subchannel_set ) {
2007-07-27 14:29:19 +04:00
CIO_MSG_EVENT ( 0 , " could not allocate slow subchannel set \n " ) ;
2007-04-27 18:01:34 +04:00
return - ENOMEM ;
}
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:10:48 +03:00
static int slow_eval_known_fn ( struct subchannel * sch , void * data )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:10:48 +03:00
int eval ;
int rc ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( & slow_subchannel_lock ) ;
2008-01-26 16:10:48 +03:00
eval = idset_sch_contains ( slow_subchannel_set , sch - > schid ) ;
idset_sch_del ( slow_subchannel_set , sch - > schid ) ;
spin_unlock_irq ( & slow_subchannel_lock ) ;
if ( eval ) {
rc = css_evaluate_known_subchannel ( sch , 1 ) ;
if ( rc = = - EAGAIN )
css_schedule_eval ( sch - > schid ) ;
2020-06-18 17:42:45 +03:00
/*
* The loop might take long time for platforms with lots of
* known devices . Allow scheduling here .
*/
cond_resched ( ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:10:48 +03:00
return 0 ;
}
static int slow_eval_unknown_fn ( struct subchannel_id schid , void * data )
{
int eval ;
int rc = 0 ;
spin_lock_irq ( & slow_subchannel_lock ) ;
eval = idset_sch_contains ( slow_subchannel_set , schid ) ;
idset_sch_del ( slow_subchannel_set , schid ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( & slow_subchannel_lock ) ;
2008-01-26 16:10:48 +03:00
if ( eval ) {
rc = css_evaluate_new_subchannel ( schid , 1 ) ;
switch ( rc ) {
case - EAGAIN :
css_schedule_eval ( schid ) ;
rc = 0 ;
break ;
case - ENXIO :
case - ENOMEM :
case - EIO :
/* These should abort looping */
2013-08-29 22:31:06 +04:00
spin_lock_irq ( & slow_subchannel_lock ) ;
2012-10-15 14:24:56 +04:00
idset_sch_del_subseq ( slow_subchannel_set , schid ) ;
2013-08-29 22:31:06 +04:00
spin_unlock_irq ( & slow_subchannel_lock ) ;
2008-01-26 16:10:48 +03:00
break ;
default :
rc = 0 ;
}
2013-11-26 17:57:13 +04:00
/* Allow scheduling here since the containing loop might
* take a while . */
cond_resched ( ) ;
2008-01-26 16:10:48 +03:00
}
return rc ;
}
static void css_slow_path_func ( struct work_struct * unused )
{
2009-09-23 00:58:34 +04:00
unsigned long flags ;
2008-01-26 16:10:48 +03:00
CIO_TRACE_EVENT ( 4 , " slowpath " ) ;
for_each_subchannel_staged ( slow_eval_known_fn , slow_eval_unknown_fn ,
NULL ) ;
2009-09-23 00:58:34 +04:00
spin_lock_irqsave ( & slow_subchannel_lock , flags ) ;
if ( idset_is_empty ( slow_subchannel_set ) ) {
atomic_set ( & css_eval_scheduled , 0 ) ;
wake_up ( & css_eval_wq ) ;
}
spin_unlock_irqrestore ( & slow_subchannel_lock , flags ) ;
2005-04-17 02:20:36 +04:00
}
2013-11-26 17:58:08 +04:00
static DECLARE_DELAYED_WORK ( slow_path_work , css_slow_path_func ) ;
2010-02-27 00:37:24 +03:00
struct workqueue_struct * cio_work_q ;
2005-04-17 02:20:36 +04:00
2007-04-27 18:01:34 +04:00
void css_schedule_eval ( struct subchannel_id schid )
{
unsigned long flags ;
spin_lock_irqsave ( & slow_subchannel_lock , flags ) ;
idset_sch_add ( slow_subchannel_set , schid ) ;
2009-09-23 00:58:34 +04:00
atomic_set ( & css_eval_scheduled , 1 ) ;
2013-11-26 17:58:08 +04:00
queue_delayed_work ( cio_work_q , & slow_path_work , 0 ) ;
2007-04-27 18:01:34 +04:00
spin_unlock_irqrestore ( & slow_subchannel_lock , flags ) ;
}
void css_schedule_eval_all ( void )
{
unsigned long flags ;
spin_lock_irqsave ( & slow_subchannel_lock , flags ) ;
idset_fill ( slow_subchannel_set ) ;
2009-09-23 00:58:34 +04:00
atomic_set ( & css_eval_scheduled , 1 ) ;
2013-11-26 17:58:08 +04:00
queue_delayed_work ( cio_work_q , & slow_path_work , 0 ) ;
2007-04-27 18:01:34 +04:00
spin_unlock_irqrestore ( & slow_subchannel_lock , flags ) ;
}
2009-09-23 00:58:38 +04:00
static int __unset_registered ( struct device * dev , void * data )
2008-04-17 09:45:59 +04:00
{
2009-09-23 00:58:38 +04:00
struct idset * set = data ;
struct subchannel * sch = to_subchannel ( dev ) ;
2006-06-29 16:57:03 +04:00
2009-09-23 00:58:38 +04:00
idset_sch_del ( set , sch - > schid ) ;
return 0 ;
2009-03-26 17:24:20 +03:00
}
2013-11-26 17:58:08 +04:00
void css_schedule_eval_all_unreg ( unsigned long delay )
2006-06-29 16:57:03 +04:00
{
2009-09-23 00:58:38 +04:00
unsigned long flags ;
struct idset * unreg_set ;
2006-06-29 16:57:03 +04:00
2009-09-23 00:58:38 +04:00
/* Find unregistered subchannels. */
unreg_set = idset_sch_new ( ) ;
if ( ! unreg_set ) {
/* Fallback. */
css_schedule_eval_all ( ) ;
2009-03-26 17:24:20 +03:00
return ;
}
2009-09-23 00:58:38 +04:00
idset_fill ( unreg_set ) ;
bus_for_each_dev ( & css_bus_type , NULL , unreg_set , __unset_registered ) ;
/* Apply to slow_subchannel_set. */
spin_lock_irqsave ( & slow_subchannel_lock , flags ) ;
idset_add_set ( slow_subchannel_set , unreg_set ) ;
atomic_set ( & css_eval_scheduled , 1 ) ;
2013-11-26 17:58:08 +04:00
queue_delayed_work ( cio_work_q , & slow_path_work , delay ) ;
2009-09-23 00:58:38 +04:00
spin_unlock_irqrestore ( & slow_subchannel_lock , flags ) ;
idset_free ( unreg_set ) ;
2006-06-29 16:57:03 +04:00
}
2009-09-23 00:58:38 +04:00
void css_wait_for_slow_path ( void )
{
2010-02-27 00:37:24 +03:00
flush_workqueue ( cio_work_q ) ;
2009-09-23 00:58:38 +04:00
}
2006-06-29 16:57:03 +04:00
/* Schedule reprobing of all unregistered subchannels. */
void css_schedule_reprobe ( void )
{
2013-11-26 17:58:08 +04:00
/* Schedule with a delay to allow merging of subsequent calls. */
css_schedule_eval_all_unreg ( 1 * HZ ) ;
2006-06-29 16:57:03 +04:00
}
EXPORT_SYMBOL_GPL ( css_schedule_reprobe ) ;
2005-04-17 02:20:36 +04:00
/*
* Called from the machine check handler for subchannel report words .
*/
2008-07-14 11:58:46 +04:00
static void css_process_crw ( struct crw * crw0 , struct crw * crw1 , int overflow )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:21 +03:00
struct subchannel_id mchk_schid ;
2011-01-05 14:47:58 +03:00
struct subchannel * sch ;
2005-04-17 02:20:36 +04:00
2008-07-14 11:58:46 +04:00
if ( overflow ) {
css_schedule_eval_all ( ) ;
return ;
}
CIO_CRW_EVENT ( 2 , " CRW0 reports slct=%d, oflw=%d, "
" chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X \n " ,
crw0 - > slct , crw0 - > oflw , crw0 - > chn , crw0 - > rsc , crw0 - > anc ,
crw0 - > erc , crw0 - > rsid ) ;
if ( crw1 )
CIO_CRW_EVENT ( 2 , " CRW1 reports slct=%d, oflw=%d, "
" chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X \n " ,
crw1 - > slct , crw1 - > oflw , crw1 - > chn , crw1 - > rsc ,
crw1 - > anc , crw1 - > erc , crw1 - > rsid ) ;
2006-01-06 11:19:21 +03:00
init_subchannel_id ( & mchk_schid ) ;
2008-07-14 11:58:46 +04:00
mchk_schid . sch_no = crw0 - > rsid ;
if ( crw1 )
2010-12-01 12:08:02 +03:00
mchk_schid . ssid = ( crw1 - > rsid > > 4 ) & 3 ;
2006-01-06 11:19:25 +03:00
2011-01-05 14:47:58 +03:00
if ( crw0 - > erc = = CRW_ERC_PMOD ) {
sch = get_subchannel_by_schid ( mchk_schid ) ;
if ( sch ) {
css_update_ssd_info ( sch ) ;
put_device ( & sch - > dev ) ;
}
}
2008-07-14 11:58:46 +04:00
/*
2005-04-17 02:20:36 +04:00
* Since we are always presented with IPI in the CRW , we have to
* use stsch ( ) to find out if the subchannel in question has come
* or gone .
*/
2007-04-27 18:01:34 +04:00
css_evaluate_subchannel ( mchk_schid , 0 ) ;
2005-04-17 02:20:36 +04:00
}
static void __init
2006-01-06 11:19:23 +03:00
css_generate_pgid ( struct channel_subsystem * css , u32 tod_high )
2005-04-17 02:20:36 +04:00
{
2010-05-17 12:00:00 +04:00
struct cpuid cpu_id ;
2008-07-14 11:58:57 +04:00
if ( css_general_characteristics . mcss ) {
2006-01-06 11:19:23 +03:00
css - > global_pgid . pgid_high . ext_cssid . version = 0x80 ;
2016-06-17 20:45:23 +03:00
css - > global_pgid . pgid_high . ext_cssid . cssid =
2020-09-10 20:23:45 +03:00
css - > id_valid ? css - > cssid : 0 ;
2006-01-06 11:19:23 +03:00
} else {
2009-03-26 17:24:42 +03:00
css - > global_pgid . pgid_high . cpu_addr = stap ( ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-17 12:00:00 +04:00
get_cpu_id ( & cpu_id ) ;
css - > global_pgid . cpu_id = cpu_id . ident ;
css - > global_pgid . cpu_model = cpu_id . machine ;
2006-01-06 11:19:23 +03:00
css - > global_pgid . tod_high = tod_high ;
}
2016-10-25 15:05:08 +03:00
static void channel_subsystem_release ( struct device * dev )
2006-01-06 11:19:26 +03:00
{
2016-10-25 15:05:08 +03:00
struct channel_subsystem * css = to_css ( dev ) ;
2006-01-06 11:19:26 +03:00
2006-03-24 14:15:14 +03:00
mutex_destroy ( & css - > mutex ) ;
2006-01-06 11:19:26 +03:00
kfree ( css ) ;
}
2016-10-11 19:21:36 +03:00
static ssize_t real_cssid_show ( struct device * dev , struct device_attribute * a ,
char * buf )
{
struct channel_subsystem * css = to_css ( dev ) ;
2020-09-10 20:23:45 +03:00
if ( ! css - > id_valid )
2016-10-11 19:21:36 +03:00
return - EINVAL ;
return sprintf ( buf , " %x \n " , css - > cssid ) ;
}
static DEVICE_ATTR_RO ( real_cssid ) ;
2016-10-11 17:37:43 +03:00
static ssize_t cm_enable_show ( struct device * dev , struct device_attribute * a ,
char * buf )
2006-03-24 14:15:14 +03:00
{
struct channel_subsystem * css = to_css ( dev ) ;
2008-04-17 09:46:01 +04:00
int ret ;
2006-03-24 14:15:14 +03:00
2008-04-17 09:46:01 +04:00
mutex_lock ( & css - > mutex ) ;
ret = sprintf ( buf , " %x \n " , css - > cm_enabled ) ;
mutex_unlock ( & css - > mutex ) ;
return ret ;
2006-03-24 14:15:14 +03:00
}
2016-10-11 17:37:43 +03:00
static ssize_t cm_enable_store ( struct device * dev , struct device_attribute * a ,
const char * buf , size_t count )
2006-03-24 14:15:14 +03:00
{
struct channel_subsystem * css = to_css ( dev ) ;
2008-04-30 15:38:33 +04:00
unsigned long val ;
2016-10-11 17:37:43 +03:00
int ret ;
2006-03-24 14:15:14 +03:00
2013-07-22 05:18:15 +04:00
ret = kstrtoul ( buf , 16 , & val ) ;
2008-04-30 15:38:33 +04:00
if ( ret )
return ret ;
2008-04-17 09:46:01 +04:00
mutex_lock ( & css - > mutex ) ;
2008-04-30 15:38:33 +04:00
switch ( val ) {
case 0 :
2006-03-24 14:15:14 +03:00
ret = css - > cm_enabled ? chsc_secm ( css , 0 ) : 0 ;
break ;
2008-04-30 15:38:33 +04:00
case 1 :
2006-03-24 14:15:14 +03:00
ret = css - > cm_enabled ? 0 : chsc_secm ( css , 1 ) ;
break ;
default :
ret = - EINVAL ;
}
2008-04-17 09:46:01 +04:00
mutex_unlock ( & css - > mutex ) ;
2006-03-24 14:15:14 +03:00
return ret < 0 ? ret : count ;
}
2016-10-11 17:37:43 +03:00
static DEVICE_ATTR_RW ( cm_enable ) ;
static umode_t cm_enable_mode ( struct kobject * kobj , struct attribute * attr ,
int index )
{
return css_chsc_characteristics . secm ? attr - > mode : 0 ;
}
2016-10-11 19:21:36 +03:00
static struct attribute * cssdev_attrs [ ] = {
& dev_attr_real_cssid . attr ,
NULL ,
} ;
static struct attribute_group cssdev_attr_group = {
. attrs = cssdev_attrs ,
} ;
2016-10-11 17:37:43 +03:00
static struct attribute * cssdev_cm_attrs [ ] = {
& dev_attr_cm_enable . attr ,
NULL ,
} ;
static struct attribute_group cssdev_cm_attr_group = {
. attrs = cssdev_cm_attrs ,
. is_visible = cm_enable_mode ,
} ;
2006-03-24 14:15:14 +03:00
2016-10-11 17:37:43 +03:00
static const struct attribute_group * cssdev_attr_groups [ ] = {
2016-10-11 19:21:36 +03:00
& cssdev_attr_group ,
2016-10-11 17:37:43 +03:00
& cssdev_cm_attr_group ,
NULL ,
} ;
2006-03-24 14:15:14 +03:00
2007-02-05 23:18:53 +03:00
static int __init setup_css ( int nr )
2006-01-06 11:19:23 +03:00
{
2007-10-12 18:11:13 +04:00
struct channel_subsystem * css ;
2016-10-25 15:05:08 +03:00
int ret ;
2006-01-06 11:19:23 +03:00
2016-10-25 15:05:08 +03:00
css = kzalloc ( sizeof ( * css ) , GFP_KERNEL ) ;
if ( ! css )
2006-12-08 17:54:28 +03:00
return - ENOMEM ;
2016-10-25 15:05:08 +03:00
channel_subsystems [ nr ] = css ;
dev_set_name ( & css - > device , " css%x " , nr ) ;
css - > device . groups = cssdev_attr_groups ;
css - > device . release = channel_subsystem_release ;
2019-04-02 19:47:29 +03:00
/*
* We currently allocate notifier bits with this ( using
* css - > device as the device argument with the DMA API )
* and are fine with 64 bit addresses .
*/
css - > device . coherent_dma_mask = DMA_BIT_MASK ( 64 ) ;
css - > device . dma_mask = & css - > device . coherent_dma_mask ;
2016-10-25 15:05:08 +03:00
mutex_init ( & css - > mutex ) ;
2020-09-10 20:23:45 +03:00
ret = chsc_get_cssid_iid ( nr , & css - > cssid , & css - > iid ) ;
if ( ! ret ) {
css - > id_valid = true ;
pr_info ( " Partition identifier %01x.%01x \n " , css - > cssid ,
css - > iid ) ;
}
2016-10-25 15:05:08 +03:00
css_generate_pgid ( css , ( u32 ) ( get_tod_clock ( ) > > 32 ) ) ;
ret = device_register ( & css - > device ) ;
if ( ret ) {
put_device ( & css - > device ) ;
goto out_err ;
}
css - > pseudo_subchannel = kzalloc ( sizeof ( * css - > pseudo_subchannel ) ,
GFP_KERNEL ) ;
if ( ! css - > pseudo_subchannel ) {
device_unregister ( & css - > device ) ;
ret = - ENOMEM ;
goto out_err ;
}
2007-10-12 18:11:13 +04:00
css - > pseudo_subchannel - > dev . parent = & css - > device ;
css - > pseudo_subchannel - > dev . release = css_subchannel_release ;
2009-12-07 14:51:17 +03:00
mutex_init ( & css - > pseudo_subchannel - > reg_mutex ) ;
2013-04-13 15:08:01 +04:00
ret = css_sch_create_locks ( css - > pseudo_subchannel ) ;
2006-12-08 17:54:28 +03:00
if ( ret ) {
2007-10-12 18:11:13 +04:00
kfree ( css - > pseudo_subchannel ) ;
2016-10-25 15:05:08 +03:00
device_unregister ( & css - > device ) ;
goto out_err ;
2006-12-08 17:54:28 +03:00
}
2016-06-17 20:45:23 +03:00
2016-10-25 15:05:08 +03:00
dev_set_name ( & css - > pseudo_subchannel - > dev , " defunct " ) ;
ret = device_register ( & css - > pseudo_subchannel - > dev ) ;
if ( ret ) {
put_device ( & css - > pseudo_subchannel - > dev ) ;
device_unregister ( & css - > device ) ;
goto out_err ;
}
return ret ;
out_err :
channel_subsystems [ nr ] = NULL ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:20 +04:00
static int css_reboot_event ( struct notifier_block * this ,
unsigned long event ,
void * ptr )
{
2016-11-08 16:28:03 +03:00
struct channel_subsystem * css ;
int ret ;
2007-10-12 18:11:20 +04:00
ret = NOTIFY_DONE ;
2016-11-08 16:28:03 +03:00
for_each_css ( css ) {
2008-04-17 09:46:01 +04:00
mutex_lock ( & css - > mutex ) ;
2007-10-12 18:11:20 +04:00
if ( css - > cm_enabled )
if ( chsc_secm ( css , 0 ) )
ret = NOTIFY_BAD ;
2008-04-17 09:46:01 +04:00
mutex_unlock ( & css - > mutex ) ;
2007-10-12 18:11:20 +04:00
}
return ret ;
}
static struct notifier_block css_reboot_notifier = {
. notifier_call = css_reboot_event ,
} ;
2019-04-02 19:47:29 +03:00
# define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
static struct gen_pool * cio_dma_pool ;
/* Currently cio supports only a single css */
struct device * cio_get_dma_css_dev ( void )
{
return & channel_subsystems [ 0 ] - > device ;
}
struct gen_pool * cio_gp_dma_create ( struct device * dma_dev , int nr_pages )
{
struct gen_pool * gp_dma ;
void * cpu_addr ;
dma_addr_t dma_addr ;
int i ;
gp_dma = gen_pool_create ( 3 , - 1 ) ;
if ( ! gp_dma )
return NULL ;
for ( i = 0 ; i < nr_pages ; + + i ) {
cpu_addr = dma_alloc_coherent ( dma_dev , PAGE_SIZE , & dma_addr ,
CIO_DMA_GFP ) ;
if ( ! cpu_addr )
return gp_dma ;
gen_pool_add_virt ( gp_dma , ( unsigned long ) cpu_addr ,
dma_addr , PAGE_SIZE , - 1 ) ;
}
return gp_dma ;
}
static void __gp_dma_free_dma ( struct gen_pool * pool ,
struct gen_pool_chunk * chunk , void * data )
{
size_t chunk_size = chunk - > end_addr - chunk - > start_addr + 1 ;
dma_free_coherent ( ( struct device * ) data , chunk_size ,
( void * ) chunk - > start_addr ,
( dma_addr_t ) chunk - > phys_addr ) ;
}
void cio_gp_dma_destroy ( struct gen_pool * gp_dma , struct device * dma_dev )
{
if ( ! gp_dma )
return ;
/* this is quite ugly but no better idea */
gen_pool_for_each_chunk ( gp_dma , __gp_dma_free_dma , dma_dev ) ;
gen_pool_destroy ( gp_dma ) ;
}
static int cio_dma_pool_init ( void )
{
/* No need to free up the resources: compiled in */
cio_dma_pool = cio_gp_dma_create ( cio_get_dma_css_dev ( ) , 1 ) ;
if ( ! cio_dma_pool )
return - ENOMEM ;
return 0 ;
}
void * cio_gp_dma_zalloc ( struct gen_pool * gp_dma , struct device * dma_dev ,
size_t size )
{
dma_addr_t dma_addr ;
unsigned long addr ;
size_t chunk_size ;
if ( ! gp_dma )
return NULL ;
addr = gen_pool_alloc ( gp_dma , size ) ;
while ( ! addr ) {
chunk_size = round_up ( size , PAGE_SIZE ) ;
addr = ( unsigned long ) dma_alloc_coherent ( dma_dev ,
chunk_size , & dma_addr , CIO_DMA_GFP ) ;
if ( ! addr )
return NULL ;
gen_pool_add_virt ( gp_dma , addr , dma_addr , chunk_size , - 1 ) ;
addr = gen_pool_alloc ( gp_dma , size ) ;
}
return ( void * ) addr ;
}
void cio_gp_dma_free ( struct gen_pool * gp_dma , void * cpu_addr , size_t size )
{
if ( ! cpu_addr )
return ;
memset ( cpu_addr , 0 , size ) ;
gen_pool_free ( gp_dma , ( unsigned long ) cpu_addr , size ) ;
}
/*
* Allocate dma memory from the css global pool . Intended for memory not
* specific to any single device within the css . The allocated memory
* is not guaranteed to be 31 - bit addressable .
*
* Caution : Not suitable for early stuff like console .
*/
void * cio_dma_zalloc ( size_t size )
{
return cio_gp_dma_zalloc ( cio_dma_pool , cio_get_dma_css_dev ( ) , size ) ;
}
void cio_dma_free ( void * cpu_addr , size_t size )
{
cio_gp_dma_free ( cio_dma_pool , cpu_addr , size ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Now that the driver core is running , we can setup our channel subsystem .
2013-04-13 15:03:54 +04:00
* The struct subchannel ' s are created during probing .
2005-04-17 02:20:36 +04:00
*/
2009-09-23 00:58:33 +04:00
static int __init css_bus_init ( void )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:23 +03:00
int ret , i ;
2005-04-17 02:20:36 +04:00
2010-10-25 18:10:28 +04:00
ret = chsc_init ( ) ;
if ( ret )
return ret ;
2010-10-25 18:10:29 +04:00
chsc_determine_css_characteristics ( ) ;
2009-09-23 00:58:37 +04:00
/* Try to enable MSS. */
ret = chsc_enable_facility ( CHSC_SDA_OC_MSS ) ;
2010-04-22 19:17:03 +04:00
if ( ret )
2009-09-23 00:58:37 +04:00
max_ssid = 0 ;
2010-04-22 19:17:03 +04:00
else /* Success. */
max_ssid = __MAX_SSID ;
2009-09-23 00:58:37 +04:00
2007-07-27 14:29:21 +04:00
ret = slow_subchannel_init ( ) ;
if ( ret )
goto out ;
2009-03-26 17:24:01 +03:00
ret = crw_register_handler ( CRW_RSC_SCH , css_process_crw ) ;
2008-07-14 11:58:46 +04:00
if ( ret )
goto out ;
2005-04-17 02:20:36 +04:00
if ( ( ret = bus_register ( & css_bus_type ) ) )
goto out ;
2006-01-06 11:19:23 +03:00
/* Setup css structure. */
2016-11-08 16:28:03 +03:00
for ( i = 0 ; i < = MAX_CSS_IDX ; i + + ) {
2006-12-08 17:54:28 +03:00
ret = setup_css ( i ) ;
2016-10-25 15:05:08 +03:00
if ( ret )
2008-09-09 14:38:57 +04:00
goto out_unregister ;
2006-01-06 11:19:23 +03:00
}
2007-10-12 18:11:20 +04:00
ret = register_reboot_notifier ( & css_reboot_notifier ) ;
if ( ret )
2008-09-09 14:38:57 +04:00
goto out_unregister ;
2019-04-02 19:47:29 +03:00
ret = cio_dma_pool_init ( ) ;
if ( ret )
2020-11-12 18:26:29 +03:00
goto out_unregister_rn ;
2018-09-13 19:57:16 +03:00
airq_init ( ) ;
2005-04-17 02:20:36 +04:00
css_init_done = 1 ;
2008-07-14 11:58:58 +04:00
/* Enable default isc for I/O subchannels. */
2008-07-14 11:59:01 +04:00
isc_register ( IO_SCH_ISC ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2019-04-02 19:47:29 +03:00
out_unregister_rn :
unregister_reboot_notifier ( & css_reboot_notifier ) ;
2006-01-06 11:19:25 +03:00
out_unregister :
2016-10-25 15:05:08 +03:00
while ( i - - > 0 ) {
struct channel_subsystem * css = channel_subsystems [ i ] ;
2007-10-12 18:11:13 +04:00
device_unregister ( & css - > pseudo_subchannel - > dev ) ;
device_unregister ( & css - > device ) ;
2006-01-06 11:19:23 +03:00
}
2005-04-17 02:20:36 +04:00
bus_unregister ( & css_bus_type ) ;
out :
2010-10-25 18:10:28 +04:00
crw_unregister_handler ( CRW_RSC_SCH ) ;
2009-09-23 00:58:36 +04:00
idset_free ( slow_subchannel_set ) ;
2010-10-25 18:10:28 +04:00
chsc_init_cleanup ( ) ;
2008-12-25 15:39:36 +03:00
pr_alert ( " The CSS device driver initialization failed with "
" errno=%d \n " , ret ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-09-23 00:58:33 +04:00
static void __init css_bus_cleanup ( void )
{
struct channel_subsystem * css ;
2016-11-08 16:28:03 +03:00
for_each_css ( css ) {
2009-09-23 00:58:33 +04:00
device_unregister ( & css - > pseudo_subchannel - > dev ) ;
device_unregister ( & css - > device ) ;
}
bus_unregister ( & css_bus_type ) ;
2010-10-25 18:10:28 +04:00
crw_unregister_handler ( CRW_RSC_SCH ) ;
2009-09-23 00:58:36 +04:00
idset_free ( slow_subchannel_set ) ;
2010-10-25 18:10:28 +04:00
chsc_init_cleanup ( ) ;
2009-09-23 00:58:33 +04:00
isc_unregister ( IO_SCH_ISC ) ;
}
static int __init channel_subsystem_init ( void )
{
int ret ;
ret = css_bus_init ( ) ;
if ( ret )
return ret ;
2010-02-27 00:37:24 +03:00
cio_work_q = create_singlethread_workqueue ( " cio " ) ;
if ( ! cio_work_q ) {
ret = - ENOMEM ;
goto out_bus ;
}
2009-09-23 00:58:33 +04:00
ret = io_subchannel_init ( ) ;
if ( ret )
2010-02-27 00:37:24 +03:00
goto out_wq ;
2009-09-23 00:58:33 +04:00
2018-06-12 14:56:21 +03:00
/* Register subchannels which are already in use. */
cio_register_early_subchannels ( ) ;
/* Start initial subchannel evaluation. */
css_schedule_eval_all ( ) ;
2009-09-23 00:58:33 +04:00
return ret ;
2010-02-27 00:37:24 +03:00
out_wq :
destroy_workqueue ( cio_work_q ) ;
out_bus :
css_bus_cleanup ( ) ;
return ret ;
2009-09-23 00:58:33 +04:00
}
subsys_initcall ( channel_subsystem_init ) ;
2009-09-23 00:58:35 +04:00
static int css_settle ( struct device_driver * drv , void * unused )
{
struct css_driver * cssdrv = to_cssdriver ( drv ) ;
if ( cssdrv - > settle )
2010-02-27 00:37:27 +03:00
return cssdrv - > settle ( ) ;
2009-09-23 00:58:35 +04:00
return 0 ;
}
2010-02-27 00:37:29 +03:00
int css_complete_work ( void )
2010-02-27 00:37:25 +03:00
{
int ret ;
/* Wait for the evaluation of subchannels to finish. */
2010-02-27 00:37:27 +03:00
ret = wait_event_interruptible ( css_eval_wq ,
atomic_read ( & css_eval_scheduled ) = = 0 ) ;
if ( ret )
return - EINTR ;
2010-02-27 00:37:25 +03:00
flush_workqueue ( cio_work_q ) ;
/* Wait for the subchannel type specific initialization to finish */
2010-02-27 00:37:27 +03:00
return bus_for_each_drv ( & css_bus_type , NULL , NULL , css_settle ) ;
2010-02-27 00:37:25 +03:00
}
2009-09-23 00:58:33 +04:00
/*
* Wait for the initialization of devices to finish , to make sure we are
* done with our setup if the search for the root device starts .
*/
static int __init channel_subsystem_init_sync ( void )
{
2010-02-27 00:37:25 +03:00
css_complete_work ( ) ;
return 0 ;
2009-09-23 00:58:33 +04:00
}
subsys_initcall_sync ( channel_subsystem_init_sync ) ;
2010-02-27 00:37:25 +03:00
# ifdef CONFIG_PROC_FS
static ssize_t cio_settle_write ( struct file * file , const char __user * buf ,
size_t count , loff_t * ppos )
{
2010-02-27 00:37:27 +03:00
int ret ;
2010-02-27 00:37:25 +03:00
/* Handle pending CRW's. */
crw_wait_for_channel_report ( ) ;
2010-02-27 00:37:27 +03:00
ret = css_complete_work ( ) ;
return ret ? ret : count ;
2010-02-27 00:37:25 +03:00
}
2020-02-04 04:37:17 +03:00
static const struct proc_ops cio_settle_proc_ops = {
. proc_open = nonseekable_open ,
. proc_write = cio_settle_write ,
. proc_lseek = no_llseek ,
2010-02-27 00:37:25 +03:00
} ;
static int __init cio_settle_init ( void )
{
struct proc_dir_entry * entry ;
2020-02-04 04:37:17 +03:00
entry = proc_create ( " cio_settle " , S_IWUSR , NULL , & cio_settle_proc_ops ) ;
2010-02-27 00:37:25 +03:00
if ( ! entry )
return - ENOMEM ;
return 0 ;
}
device_initcall ( cio_settle_init ) ;
# endif /*CONFIG_PROC_FS*/
2006-12-08 17:54:28 +03:00
int sch_is_pseudo_sch ( struct subchannel * sch )
{
2019-09-19 16:55:17 +03:00
if ( ! sch - > dev . parent )
return 0 ;
2006-12-08 17:54:28 +03:00
return sch = = to_css ( sch - > dev . parent ) - > pseudo_subchannel ;
}
2008-07-14 11:59:03 +04:00
static int css_bus_match ( struct device * dev , struct device_driver * drv )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:10:38 +03:00
struct subchannel * sch = to_subchannel ( dev ) ;
struct css_driver * driver = to_cssdriver ( drv ) ;
2008-07-14 11:59:03 +04:00
struct css_device_id * id ;
2005-04-17 02:20:36 +04:00
2019-06-13 14:08:15 +03:00
/* When driver_override is set, only bind to the matching driver */
if ( sch - > driver_override & & strcmp ( sch - > driver_override , drv - > name ) )
return 0 ;
2008-07-14 11:59:03 +04:00
for ( id = driver - > subchannel_type ; id - > match_flags ; id + + ) {
if ( sch - > st = = id - > type )
return 1 ;
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-01-26 16:10:40 +03:00
static int css_probe ( struct device * dev )
2006-01-11 12:56:22 +03:00
{
struct subchannel * sch ;
2008-01-26 16:10:40 +03:00
int ret ;
2006-01-11 12:56:22 +03:00
sch = to_subchannel ( dev ) ;
2008-01-26 16:10:38 +03:00
sch - > driver = to_cssdriver ( dev - > driver ) ;
2008-01-26 16:10:40 +03:00
ret = sch - > driver - > probe ? sch - > driver - > probe ( sch ) : 0 ;
if ( ret )
sch - > driver = NULL ;
return ret ;
2006-01-11 12:56:22 +03:00
}
2008-01-26 16:10:40 +03:00
static int css_remove ( struct device * dev )
2006-01-11 12:56:22 +03:00
{
struct subchannel * sch ;
2008-01-26 16:10:40 +03:00
int ret ;
2006-01-11 12:56:22 +03:00
sch = to_subchannel ( dev ) ;
2008-01-26 16:10:40 +03:00
ret = sch - > driver - > remove ? sch - > driver - > remove ( sch ) : 0 ;
sch - > driver = NULL ;
return ret ;
2006-01-11 12:56:22 +03:00
}
2008-01-26 16:10:40 +03:00
static void css_shutdown ( struct device * dev )
2006-01-11 12:56:22 +03:00
{
struct subchannel * sch ;
sch = to_subchannel ( dev ) ;
2008-01-26 16:10:40 +03:00
if ( sch - > driver & & sch - > driver - > shutdown )
2006-01-11 12:56:22 +03:00
sch - > driver - > shutdown ( sch ) ;
}
2008-07-14 11:58:44 +04:00
static int css_uevent ( struct device * dev , struct kobj_uevent_env * env )
{
struct subchannel * sch = to_subchannel ( dev ) ;
int ret ;
ret = add_uevent_var ( env , " ST=%01X " , sch - > st ) ;
if ( ret )
return ret ;
ret = add_uevent_var ( env , " MODALIAS=css:t%01X " , sch - > st ) ;
return ret ;
}
2011-03-15 19:08:31 +03:00
static struct bus_type css_bus_type = {
2006-01-11 12:56:22 +03:00
. name = " css " ,
. match = css_bus_match ,
. probe = css_probe ,
. remove = css_remove ,
. shutdown = css_shutdown ,
2008-07-14 11:58:44 +04:00
. uevent = css_uevent ,
2005-04-17 02:20:36 +04:00
} ;
2008-01-26 16:10:41 +03:00
/**
* css_driver_register - register a css driver
* @ cdrv : css driver to register
*
* This is mainly a wrapper around driver_register that sets name
* and bus_type in the embedded struct device_driver correctly .
*/
int css_driver_register ( struct css_driver * cdrv )
{
cdrv - > drv . bus = & css_bus_type ;
return driver_register ( & cdrv - > drv ) ;
}
EXPORT_SYMBOL_GPL ( css_driver_register ) ;
/**
* css_driver_unregister - unregister a css driver
* @ cdrv : css driver to unregister
*
* This is a wrapper around driver_unregister .
*/
void css_driver_unregister ( struct css_driver * cdrv )
{
driver_unregister ( & cdrv - > drv ) ;
}
EXPORT_SYMBOL_GPL ( css_driver_unregister ) ;