2017-11-14 20:38:02 +03:00
// SPDX-License-Identifier: GPL-1.0+
2005-04-17 02:20:36 +04:00
/*
* bus driver for ccw devices
*
2012-07-20 13:15:04 +04:00
* Copyright IBM Corp . 2002 , 2008
2005-04-17 02:20:36 +04:00
* Author ( s ) : Arnd Bergmann ( arndb @ de . ibm . com )
2006-01-15 00:21:04 +03:00
* Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-17 02:20:36 +04:00
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
2009-12-07 14:51:20 +03:00
# define KMSG_COMPONENT "cio"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2016-10-30 23:37:24 +03:00
# include <linux/export.h>
2005-04-17 02:20:36 +04:00
# include <linux/init.h>
# include <linux/spinlock.h>
# include <linux/errno.h>
# include <linux/err.h>
# include <linux/slab.h>
# include <linux/list.h>
# include <linux/device.h>
# include <linux/workqueue.h>
2013-04-13 14:53:21 +04:00
# include <linux/delay.h>
2008-01-26 16:10:52 +03:00
# include <linux/timer.h>
2011-10-30 18:16:04 +04:00
# include <linux/kernel_stat.h>
2017-02-02 21:15:33 +03:00
# include <linux/sched/signal.h>
2019-03-26 14:41:09 +03:00
# include <linux/dma-mapping.h>
2005-04-17 02:20:36 +04:00
# include <asm/ccwdev.h>
# include <asm/cio.h>
2005-10-31 02:03:48 +03:00
# include <asm/param.h> /* HZ */
2007-10-12 18:11:22 +04:00
# include <asm/cmb.h>
2008-07-14 11:58:58 +04:00
# include <asm/isc.h>
2005-04-17 02:20:36 +04:00
2008-07-14 11:58:43 +04:00
# include "chp.h"
2005-04-17 02:20:36 +04:00
# include "cio.h"
2006-12-08 17:54:28 +03:00
# include "cio_debug.h"
2005-04-17 02:20:36 +04:00
# include "css.h"
# include "device.h"
# include "ioasm.h"
2008-01-26 16:10:43 +03:00
# include "io_sch.h"
2008-10-10 23:33:06 +04:00
# include "blacklist.h"
2010-08-09 20:12:50 +04:00
# include "chsc.h"
2005-04-17 02:20:36 +04:00
2008-01-26 16:10:52 +03:00
static struct timer_list recovery_timer ;
2008-02-19 17:29:23 +03:00
static DEFINE_SPINLOCK ( recovery_lock ) ;
2008-01-26 16:10:52 +03:00
static int recovery_phase ;
static const unsigned long recovery_delay [ ] = { 3 , 30 , 300 } ;
2013-04-13 15:06:27 +04:00
static atomic_t ccw_device_init_count = ATOMIC_INIT ( 0 ) ;
static DECLARE_WAIT_QUEUE_HEAD ( ccw_device_init_wq ) ;
static struct bus_type ccw_bus_type ;
2005-04-17 02:20:36 +04:00
/******************* bus type handling ***********************/
/* The Linux driver model distinguishes between a bus type and
* the bus itself . Of course we only have one channel
* subsystem driver and one channel system per machine , but
* we still use the abstraction . T . R . says it ' s a good idea . */
static int
ccw_bus_match ( struct device * dev , struct device_driver * drv )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_driver * cdrv = to_ccwdrv ( drv ) ;
const struct ccw_device_id * ids = cdrv - > ids , * found ;
if ( ! ids )
return 0 ;
found = ccw_device_id_match ( ids , & cdev - > id ) ;
if ( ! found )
return 0 ;
cdev - > id . driver_info = found - > driver_info ;
return 1 ;
}
2006-09-20 17:59:49 +04:00
/* Store modalias string delimited by prefix/suffix string into buffer with
* specified size . Return length of resulting string ( excluding trailing ' \0 ' )
* even if string doesn ' t fit buffer ( snprintf semantics ) . */
2007-04-27 18:01:32 +04:00
static int snprint_alias ( char * buf , size_t size ,
2006-09-20 17:59:49 +04:00
struct ccw_device_id * id , const char * suffix )
2005-04-17 02:20:36 +04:00
{
2006-09-20 17:59:49 +04:00
int len ;
2005-04-17 02:20:36 +04:00
2007-04-27 18:01:32 +04:00
len = snprintf ( buf , size , " ccw:t%04Xm%02X " , id - > cu_type , id - > cu_model ) ;
2006-09-20 17:59:49 +04:00
if ( len > size )
return len ;
buf + = len ;
size - = len ;
2005-04-17 02:20:36 +04:00
2006-09-20 17:59:49 +04:00
if ( id - > dev_type ! = 0 )
len + = snprintf ( buf , size , " dt%04Xdm%02X%s " , id - > dev_type ,
id - > dev_model , suffix ) ;
else
len + = snprintf ( buf , size , " dtdm%s " , suffix ) ;
2005-04-17 02:20:36 +04:00
2006-09-20 17:59:49 +04:00
return len ;
}
/* Set up environment variables for ccw device uevent. Return 0 on success,
* non - zero otherwise . */
2007-08-14 17:15:12 +04:00
static int ccw_uevent ( struct device * dev , struct kobj_uevent_env * env )
2006-09-20 17:59:49 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_device_id * id = & ( cdev - > id ) ;
2007-04-27 18:01:32 +04:00
int ret ;
char modalias_buf [ 30 ] ;
2005-04-17 02:20:36 +04:00
2006-09-20 17:59:49 +04:00
/* CU_TYPE= */
2007-08-14 17:15:12 +04:00
ret = add_uevent_var ( env , " CU_TYPE=%04X " , id - > cu_type ) ;
2007-04-27 18:01:32 +04:00
if ( ret )
return ret ;
2006-09-20 17:59:49 +04:00
/* CU_MODEL= */
2007-08-14 17:15:12 +04:00
ret = add_uevent_var ( env , " CU_MODEL=%02X " , id - > cu_model ) ;
2007-04-27 18:01:32 +04:00
if ( ret )
return ret ;
2005-04-17 02:20:36 +04:00
/* The next two can be zero, that's ok for us */
2006-09-20 17:59:49 +04:00
/* DEV_TYPE= */
2007-08-14 17:15:12 +04:00
ret = add_uevent_var ( env , " DEV_TYPE=%04X " , id - > dev_type ) ;
2007-04-27 18:01:32 +04:00
if ( ret )
return ret ;
2005-04-17 02:20:36 +04:00
2006-09-20 17:59:49 +04:00
/* DEV_MODEL= */
2007-08-14 17:15:12 +04:00
ret = add_uevent_var ( env , " DEV_MODEL=%02X " , id - > dev_model ) ;
2007-04-27 18:01:32 +04:00
if ( ret )
return ret ;
2006-09-20 17:59:49 +04:00
/* MODALIAS= */
2007-04-27 18:01:32 +04:00
snprint_alias ( modalias_buf , sizeof ( modalias_buf ) , id , " " ) ;
2007-08-14 17:15:12 +04:00
ret = add_uevent_var ( env , " MODALIAS=%s " , modalias_buf ) ;
return ret ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:10:39 +03:00
static void io_subchannel_irq ( struct subchannel * ) ;
static int io_subchannel_probe ( struct subchannel * ) ;
static int io_subchannel_remove ( struct subchannel * ) ;
2006-01-11 12:56:22 +03:00
static void io_subchannel_shutdown ( struct subchannel * ) ;
2008-07-14 11:58:45 +04:00
static int io_subchannel_sch_event ( struct subchannel * , int ) ;
2008-07-14 11:59:02 +04:00
static int io_subchannel_chp_event ( struct subchannel * , struct chp_link * ,
int ) ;
2017-10-17 02:43:25 +03:00
static void recovery_func ( struct timer_list * unused ) ;
2005-04-17 02:20:36 +04:00
2008-07-14 11:59:03 +04:00
static struct css_device_id io_subchannel_ids [ ] = {
{ . match_flags = 0x1 , . type = SUBCHANNEL_TYPE_IO , } ,
{ /* end of list */ } ,
} ;
2009-06-16 12:30:23 +04:00
static int io_subchannel_prepare ( struct subchannel * sch )
{
struct ccw_device * cdev ;
/*
* Don ' t allow suspend while a ccw device registration
* is still outstanding .
*/
cdev = sch_get_cdev ( sch ) ;
if ( cdev & & ! device_is_registered ( & cdev - > dev ) )
return - EAGAIN ;
return 0 ;
}
2010-02-27 00:37:27 +03:00
static int io_subchannel_settle ( void )
2009-09-23 00:58:35 +04:00
{
2010-02-27 00:37:27 +03:00
int ret ;
ret = wait_event_interruptible ( ccw_device_init_wq ,
atomic_read ( & ccw_device_init_count ) = = 0 ) ;
if ( ret )
return - EINTR ;
2010-02-27 00:37:24 +03:00
flush_workqueue ( cio_work_q ) ;
2010-02-27 00:37:27 +03:00
return 0 ;
2009-09-23 00:58:35 +04:00
}
2007-05-10 17:45:43 +04:00
static struct css_driver io_subchannel_driver = {
2011-03-15 19:08:30 +03:00
. drv = {
. owner = THIS_MODULE ,
. name = " io_subchannel " ,
} ,
2008-07-14 11:59:03 +04:00
. subchannel_type = io_subchannel_ids ,
2005-04-17 02:20:36 +04:00
. irq = io_subchannel_irq ,
2008-07-14 11:58:45 +04:00
. sch_event = io_subchannel_sch_event ,
. chp_event = io_subchannel_chp_event ,
2006-01-11 12:56:22 +03:00
. probe = io_subchannel_probe ,
. remove = io_subchannel_remove ,
. shutdown = io_subchannel_shutdown ,
2009-06-16 12:30:23 +04:00
. prepare = io_subchannel_prepare ,
2009-09-23 00:58:35 +04:00
. settle = io_subchannel_settle ,
2005-04-17 02:20:36 +04:00
} ;
2009-09-23 00:58:33 +04:00
int __init io_subchannel_init ( void )
2005-04-17 02:20:36 +04:00
{
int ret ;
2017-10-17 02:43:25 +03:00
timer_setup ( & recovery_timer , recovery_func , 0 ) ;
2010-02-27 00:37:24 +03:00
ret = bus_register ( & ccw_bus_type ) ;
if ( ret )
return ret ;
2008-01-26 16:10:41 +03:00
ret = css_driver_register ( & io_subchannel_driver ) ;
if ( ret )
2010-02-27 00:37:24 +03:00
bus_unregister ( & ccw_bus_type ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
/************************ device handling **************************/
static ssize_t
2005-05-17 14:43:27 +04:00
devtype_show ( struct device * dev , struct device_attribute * attr , char * buf )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_device_id * id = & ( cdev - > id ) ;
if ( id - > dev_type ! = 0 )
return sprintf ( buf , " %04x/%02x \n " ,
id - > dev_type , id - > dev_model ) ;
else
return sprintf ( buf , " n/a \n " ) ;
}
static ssize_t
2005-05-17 14:43:27 +04:00
cutype_show ( struct device * dev , struct device_attribute * attr , char * buf )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_device_id * id = & ( cdev - > id ) ;
return sprintf ( buf , " %04x/%02x \n " ,
id - > cu_type , id - > cu_model ) ;
}
2005-10-31 02:00:12 +03:00
static ssize_t
modalias_show ( struct device * dev , struct device_attribute * attr , char * buf )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_device_id * id = & ( cdev - > id ) ;
2006-09-20 17:59:49 +04:00
int len ;
2005-10-31 02:00:12 +03:00
2007-07-17 15:36:08 +04:00
len = snprint_alias ( buf , PAGE_SIZE , id , " \n " ) ;
2006-09-20 17:59:49 +04:00
return len > PAGE_SIZE ? PAGE_SIZE : len ;
2005-10-31 02:00:12 +03:00
}
2005-04-17 02:20:36 +04:00
static ssize_t
2005-05-17 14:43:27 +04:00
online_show ( struct device * dev , struct device_attribute * attr , char * buf )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
return sprintf ( buf , cdev - > online ? " 1 \n " : " 0 \n " ) ;
}
2006-12-08 17:54:28 +03:00
int ccw_device_is_orphan ( struct ccw_device * cdev )
{
return sch_is_pseudo_sch ( to_subchannel ( cdev - > dev . parent ) ) ;
}
2007-04-27 18:01:39 +04:00
static void ccw_device_unregister ( struct ccw_device * cdev )
2006-12-08 17:54:21 +03:00
{
2009-12-07 14:51:33 +03:00
if ( device_is_registered ( & cdev - > dev ) ) {
2009-12-07 14:51:34 +03:00
/* Undo device_add(). */
2007-04-27 18:01:39 +04:00
device_del ( & cdev - > dev ) ;
2009-12-07 14:51:34 +03:00
}
if ( cdev - > private - > flags . initialized ) {
cdev - > private - > flags . initialized = 0 ;
2009-09-11 12:28:26 +04:00
/* Release reference from device_initialize(). */
put_device ( & cdev - > dev ) ;
}
2006-12-08 17:54:21 +03:00
}
2009-12-07 14:51:41 +03:00
static void io_subchannel_quiesce ( struct subchannel * ) ;
2007-10-12 18:11:17 +04:00
/**
* ccw_device_set_offline ( ) - disable a ccw device for I / O
* @ cdev : target ccw device
*
* This function calls the driver ' s set_offline ( ) function for @ cdev , if
* given , and then disables @ cdev .
* Returns :
* % 0 on success and a negative error value on failure .
* Context :
* enabled , ccw device lock not held
*/
int ccw_device_set_offline ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2009-12-07 14:51:41 +03:00
struct subchannel * sch ;
int ret , state ;
2005-04-17 02:20:36 +04:00
if ( ! cdev )
return - ENODEV ;
if ( ! cdev - > online | | ! cdev - > drv )
return - EINVAL ;
if ( cdev - > drv - > set_offline ) {
ret = cdev - > drv - > set_offline ( cdev ) ;
if ( ret ! = 0 )
return ret ;
}
spin_lock_irq ( cdev - > ccwlock ) ;
2009-12-07 14:51:41 +03:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2013-12-16 13:51:54 +04:00
cdev - > online = 0 ;
2009-09-11 12:28:21 +04:00
/* Wait until a final state or DISCONNECTED is reached */
while ( ! dev_fsm_final_state ( cdev ) & &
cdev - > private - > state ! = DEV_STATE_DISCONNECTED ) {
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2009-09-11 12:28:21 +04:00
wait_event ( cdev - > private - > wait_q , ( dev_fsm_final_state ( cdev ) | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED ) ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
}
2009-12-07 14:51:41 +03:00
do {
ret = ccw_device_offline ( cdev ) ;
if ( ! ret )
break ;
CIO_MSG_EVENT ( 0 , " ccw_device_offline returned %d, device "
" 0.%x.%04x \n " , ret , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
if ( ret ! = - EBUSY )
goto error ;
state = cdev - > private - > state ;
spin_unlock_irq ( cdev - > ccwlock ) ;
io_subchannel_quiesce ( sch ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
cdev - > private - > state = state ;
} while ( ret = = - EBUSY ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2009-09-11 12:28:21 +04:00
wait_event ( cdev - > private - > wait_q , ( dev_fsm_final_state ( cdev ) | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED ) ) ;
2009-12-07 14:51:20 +03:00
/* Inform the user if set offline failed. */
if ( cdev - > private - > state = = DEV_STATE_BOXED ) {
2016-03-04 07:49:57 +03:00
pr_warn ( " %s: The device entered boxed state while being set offline \n " ,
dev_name ( & cdev - > dev ) ) ;
2009-12-07 14:51:20 +03:00
} else if ( cdev - > private - > state = = DEV_STATE_NOT_OPER ) {
2016-03-04 07:49:57 +03:00
pr_warn ( " %s: The device stopped operating while being set offline \n " ,
dev_name ( & cdev - > dev ) ) ;
2009-12-07 14:51:20 +03:00
}
2009-09-11 12:28:21 +04:00
/* Give up reference from ccw_device_set_online(). */
put_device ( & cdev - > dev ) ;
return 0 ;
error :
cdev - > private - > state = DEV_STATE_OFFLINE ;
dev_fsm_event ( cdev , DEV_EVENT_NOTOPER ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
/* Give up reference from ccw_device_set_online(). */
put_device ( & cdev - > dev ) ;
return - ENODEV ;
2005-04-17 02:20:36 +04:00
}
2007-10-12 18:11:17 +04:00
/**
* ccw_device_set_online ( ) - enable a ccw device for I / O
* @ cdev : target ccw device
*
* This function first enables @ cdev and then calls the driver ' s set_online ( )
* function for @ cdev , if given . If set_online ( ) returns an error , @ cdev is
* disabled again .
* Returns :
* % 0 on success and a negative error value on failure .
* Context :
* enabled , ccw device lock not held
*/
int ccw_device_set_online ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
int ret ;
2009-09-11 12:28:21 +04:00
int ret2 ;
2005-04-17 02:20:36 +04:00
if ( ! cdev )
return - ENODEV ;
if ( cdev - > online | | ! cdev - > drv )
return - EINVAL ;
2008-12-25 15:39:06 +03:00
/* Hold on to an extra reference while device is online. */
if ( ! get_device ( & cdev - > dev ) )
return - ENODEV ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
ret = ccw_device_online ( cdev ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
if ( ret = = 0 )
wait_event ( cdev - > private - > wait_q , dev_fsm_final_state ( cdev ) ) ;
else {
2008-05-07 11:22:54 +04:00
CIO_MSG_EVENT ( 0 , " ccw_device_online returned %d, "
2007-07-27 14:29:19 +04:00
" device 0.%x.%04x \n " ,
ret , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
2008-12-25 15:39:06 +03:00
/* Give up online reference since onlining failed. */
put_device ( & cdev - > dev ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2009-09-11 12:28:21 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
/* Check if online processing was successful */
if ( ( cdev - > private - > state ! = DEV_STATE_ONLINE ) & &
( cdev - > private - > state ! = DEV_STATE_W4SENSE ) ) {
spin_unlock_irq ( cdev - > ccwlock ) ;
2009-12-07 14:51:20 +03:00
/* Inform the user that set online failed. */
if ( cdev - > private - > state = = DEV_STATE_BOXED ) {
2016-03-04 07:49:57 +03:00
pr_warn ( " %s: Setting the device online failed because it is boxed \n " ,
dev_name ( & cdev - > dev ) ) ;
2009-12-07 14:51:20 +03:00
} else if ( cdev - > private - > state = = DEV_STATE_NOT_OPER ) {
2016-03-04 07:49:57 +03:00
pr_warn ( " %s: Setting the device online failed because it is not operational \n " ,
dev_name ( & cdev - > dev ) ) ;
2009-12-07 14:51:20 +03:00
}
2008-12-25 15:39:06 +03:00
/* Give up online reference since onlining failed. */
put_device ( & cdev - > dev ) ;
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2008-12-25 15:39:06 +03:00
}
2009-09-11 12:28:21 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
if ( cdev - > drv - > set_online )
ret = cdev - > drv - > set_online ( cdev ) ;
if ( ret )
goto rollback ;
2013-12-16 13:51:54 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2009-09-11 12:28:21 +04:00
cdev - > online = 1 ;
2013-12-16 13:51:54 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2009-09-11 12:28:21 +04:00
return 0 ;
rollback :
2005-04-17 02:20:36 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2009-09-11 12:28:21 +04:00
/* Wait until a final state or DISCONNECTED is reached */
while ( ! dev_fsm_final_state ( cdev ) & &
cdev - > private - > state ! = DEV_STATE_DISCONNECTED ) {
spin_unlock_irq ( cdev - > ccwlock ) ;
wait_event ( cdev - > private - > wait_q , ( dev_fsm_final_state ( cdev ) | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED ) ) ;
spin_lock_irq ( cdev - > ccwlock ) ;
}
ret2 = ccw_device_offline ( cdev ) ;
if ( ret2 )
goto error ;
spin_unlock_irq ( cdev - > ccwlock ) ;
wait_event ( cdev - > private - > wait_q , ( dev_fsm_final_state ( cdev ) | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED ) ) ;
/* Give up online reference since onlining failed. */
put_device ( & cdev - > dev ) ;
return ret ;
error :
CIO_MSG_EVENT ( 0 , " rollback ccw_device_offline returned %d, "
" device 0.%x.%04x \n " ,
ret2 , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
cdev - > private - > state = DEV_STATE_OFFLINE ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2008-12-25 15:39:06 +03:00
/* Give up online reference since onlining failed. */
put_device ( & cdev - > dev ) ;
2009-09-11 12:28:21 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2009-03-26 17:24:08 +03:00
static int online_store_handle_offline ( struct ccw_device * cdev )
2007-04-27 18:01:30 +04:00
{
2009-12-07 14:51:19 +03:00
if ( cdev - > private - > state = = DEV_STATE_DISCONNECTED ) {
spin_lock_irq ( cdev - > ccwlock ) ;
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG_EVAL ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
2010-08-09 20:12:52 +04:00
return 0 ;
}
if ( cdev - > drv & & cdev - > drv - > set_offline )
2009-03-26 17:24:08 +03:00
return ccw_device_set_offline ( cdev ) ;
2010-08-09 20:12:52 +04:00
return - EINVAL ;
2007-04-27 18:01:30 +04:00
}
static int online_store_recog_and_online ( struct ccw_device * cdev )
{
/* Do device recognition, if needed. */
2009-03-31 21:16:07 +04:00
if ( cdev - > private - > state = = DEV_STATE_BOXED ) {
2009-12-07 14:51:23 +03:00
spin_lock_irq ( cdev - > ccwlock ) ;
2009-12-07 14:51:21 +03:00
ccw_device_recognition ( cdev ) ;
2009-12-07 14:51:23 +03:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2007-04-27 18:01:30 +04:00
wait_event ( cdev - > private - > wait_q ,
cdev - > private - > flags . recog_done ) ;
2009-03-31 21:16:03 +04:00
if ( cdev - > private - > state ! = DEV_STATE_OFFLINE )
/* recognition failed */
return - EAGAIN ;
2007-04-27 18:01:30 +04:00
}
if ( cdev - > drv & & cdev - > drv - > set_online )
2010-08-09 20:12:52 +04:00
return ccw_device_set_online ( cdev ) ;
return - EINVAL ;
2007-04-27 18:01:30 +04:00
}
2009-03-31 21:16:03 +04:00
2008-07-14 11:59:22 +04:00
static int online_store_handle_online ( struct ccw_device * cdev , int force )
2007-04-27 18:01:30 +04:00
{
int ret ;
ret = online_store_recog_and_online ( cdev ) ;
2009-03-31 21:16:03 +04:00
if ( ret & & ! force )
2008-07-14 11:59:22 +04:00
return ret ;
2007-04-27 18:01:30 +04:00
if ( force & & cdev - > private - > state = = DEV_STATE_BOXED ) {
ret = ccw_device_stlck ( cdev ) ;
2008-07-14 11:59:22 +04:00
if ( ret )
return ret ;
2007-04-27 18:01:30 +04:00
if ( cdev - > id . cu_type = = 0 )
cdev - > private - > state = DEV_STATE_NOT_OPER ;
2009-03-31 21:16:03 +04:00
ret = online_store_recog_and_online ( cdev ) ;
if ( ret )
return ret ;
2007-04-27 18:01:30 +04:00
}
2008-07-14 11:59:22 +04:00
return 0 ;
2007-04-27 18:01:30 +04:00
}
static ssize_t online_store ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2008-04-30 15:38:33 +04:00
int force , ret ;
unsigned long i ;
2005-04-17 02:20:36 +04:00
2011-04-04 11:43:32 +04:00
/* Prevent conflict between multiple on-/offline processing requests. */
2009-12-07 14:51:28 +03:00
if ( atomic_cmpxchg ( & cdev - > private - > onoff , 0 , 1 ) ! = 0 )
2005-04-17 02:20:36 +04:00
return - EAGAIN ;
2011-04-04 11:43:32 +04:00
/* Prevent conflict between internal I/Os and on-/offline processing. */
if ( ! dev_fsm_final_state ( cdev ) & &
cdev - > private - > state ! = DEV_STATE_DISCONNECTED ) {
ret = - EAGAIN ;
2013-12-16 13:54:13 +04:00
goto out ;
2011-04-04 11:43:32 +04:00
}
/* Prevent conflict between pending work and on-/offline processing.*/
if ( work_pending ( & cdev - > private - > todo_work ) ) {
ret = - EAGAIN ;
2013-12-16 13:54:13 +04:00
goto out ;
2005-04-17 02:20:36 +04:00
}
if ( ! strncmp ( buf , " force \n " , count ) ) {
force = 1 ;
i = 1 ;
2008-04-30 15:38:33 +04:00
ret = 0 ;
2005-04-17 02:20:36 +04:00
} else {
force = 0 ;
2013-07-22 05:18:15 +04:00
ret = kstrtoul ( buf , 16 , & i ) ;
2005-04-17 02:20:36 +04:00
}
2008-04-30 15:38:33 +04:00
if ( ret )
goto out ;
2013-12-16 13:54:13 +04:00
device_lock ( dev ) ;
2007-04-27 18:01:30 +04:00
switch ( i ) {
case 0 :
2009-03-26 17:24:08 +03:00
ret = online_store_handle_offline ( cdev ) ;
2007-04-27 18:01:30 +04:00
break ;
case 1 :
2008-07-14 11:59:22 +04:00
ret = online_store_handle_online ( cdev , force ) ;
2007-04-27 18:01:30 +04:00
break ;
default :
2008-04-30 15:38:33 +04:00
ret = - EINVAL ;
2005-04-17 02:20:36 +04:00
}
2013-12-16 13:54:13 +04:00
device_unlock ( dev ) ;
2008-04-30 15:38:33 +04:00
out :
2005-04-17 02:20:36 +04:00
atomic_set ( & cdev - > private - > onoff , 0 ) ;
2009-03-26 17:24:08 +03:00
return ( ret < 0 ) ? ret : count ;
2005-04-17 02:20:36 +04:00
}
static ssize_t
2005-05-17 14:43:27 +04:00
available_show ( struct device * dev , struct device_attribute * attr , char * buf )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct subchannel * sch ;
2006-12-08 17:54:28 +03:00
if ( ccw_device_is_orphan ( cdev ) )
return sprintf ( buf , " no device \n " ) ;
2005-04-17 02:20:36 +04:00
switch ( cdev - > private - > state ) {
case DEV_STATE_BOXED :
return sprintf ( buf , " boxed \n " ) ;
case DEV_STATE_DISCONNECTED :
case DEV_STATE_DISCONNECTED_SENSE_ID :
case DEV_STATE_NOT_OPER :
sch = to_subchannel ( dev - > parent ) ;
if ( ! sch - > lpm )
return sprintf ( buf , " no path \n " ) ;
else
return sprintf ( buf , " no device \n " ) ;
default :
/* All other states considered fine. */
return sprintf ( buf , " good \n " ) ;
}
}
2010-08-09 20:12:50 +04:00
static ssize_t
initiate_logging ( struct device * dev , struct device_attribute * attr ,
const char * buf , size_t count )
{
struct subchannel * sch = to_subchannel ( dev ) ;
int rc ;
rc = chsc_siosl ( sch - > schid ) ;
if ( rc < 0 ) {
2016-03-04 07:49:57 +03:00
pr_warn ( " Logging for subchannel 0.%x.%04x failed with errno=%d \n " ,
sch - > schid . ssid , sch - > schid . sch_no , rc ) ;
2010-08-09 20:12:50 +04:00
return rc ;
}
pr_notice ( " Logging for subchannel 0.%x.%04x was triggered \n " ,
sch - > schid . ssid , sch - > schid . sch_no ) ;
return count ;
}
2013-01-28 22:32:27 +04:00
static ssize_t vpm_show ( struct device * dev , struct device_attribute * attr ,
char * buf )
{
struct subchannel * sch = to_subchannel ( dev ) ;
return sprintf ( buf , " %02x \n " , sch - > vpm ) ;
}
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( devtype ) ;
static DEVICE_ATTR_RO ( cutype ) ;
static DEVICE_ATTR_RO ( modalias ) ;
2017-12-19 21:15:07 +03:00
static DEVICE_ATTR_RW ( online ) ;
2005-04-17 02:20:36 +04:00
static DEVICE_ATTR ( availability , 0444 , available_show , NULL ) ;
2010-08-09 20:12:50 +04:00
static DEVICE_ATTR ( logging , 0200 , NULL , initiate_logging ) ;
2017-12-19 21:15:08 +03:00
static DEVICE_ATTR_RO ( vpm ) ;
2005-04-17 02:20:36 +04:00
2008-07-14 11:58:44 +04:00
static struct attribute * io_subchannel_attrs [ ] = {
2010-08-09 20:12:50 +04:00
& dev_attr_logging . attr ,
2013-01-28 22:32:27 +04:00
& dev_attr_vpm . attr ,
2005-04-17 02:20:36 +04:00
NULL ,
} ;
2017-07-19 10:09:13 +03:00
static const struct attribute_group io_subchannel_attr_group = {
2008-07-14 11:58:44 +04:00
. attrs = io_subchannel_attrs ,
2006-12-08 17:55:57 +03:00
} ;
2005-04-17 02:20:36 +04:00
static struct attribute * ccwdev_attrs [ ] = {
& dev_attr_devtype . attr ,
& dev_attr_cutype . attr ,
2005-10-31 02:00:12 +03:00
& dev_attr_modalias . attr ,
2005-04-17 02:20:36 +04:00
& dev_attr_online . attr ,
& dev_attr_cmb_enable . attr ,
& dev_attr_availability . attr ,
NULL ,
} ;
2017-07-19 10:09:13 +03:00
static const struct attribute_group ccwdev_attr_group = {
2005-04-17 02:20:36 +04:00
. attrs = ccwdev_attrs ,
} ;
2009-06-24 21:06:31 +04:00
static const struct attribute_group * ccwdev_attr_groups [ ] = {
2007-04-27 18:01:39 +04:00
& ccwdev_attr_group ,
NULL ,
} ;
2005-04-17 02:20:36 +04:00
2014-06-11 15:06:57 +04:00
static int ccw_device_add ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct device * dev = & cdev - > dev ;
dev - > bus = & ccw_bus_type ;
2009-12-07 14:51:33 +03:00
return device_add ( dev ) ;
2005-04-17 02:20:36 +04:00
}
2019-06-14 20:53:59 +03:00
static int match_dev_id ( struct device * dev , const void * data )
2005-06-26 01:55:27 +04:00
{
2009-12-07 14:51:17 +03:00
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2019-06-14 20:53:59 +03:00
struct ccw_dev_id * dev_id = ( void * ) data ;
2006-12-08 17:54:28 +03:00
return ccw_dev_id_is_equal ( & cdev - > private - > dev_id , dev_id ) ;
}
2012-05-15 19:52:07 +04:00
/**
* get_ccwdev_by_dev_id ( ) - obtain device from a ccw device id
* @ dev_id : id of the device to be searched
*
* This function searches all devices attached to the ccw bus for a device
* matching @ dev_id .
* Returns :
* If a device is found its reference count is increased and returned ;
* else % NULL is returned .
*/
struct ccw_device * get_ccwdev_by_dev_id ( struct ccw_dev_id * dev_id )
2006-12-08 17:54:28 +03:00
{
struct device * dev ;
2009-12-07 14:51:17 +03:00
dev = bus_find_device ( & ccw_bus_type , NULL , dev_id , match_dev_id ) ;
2006-12-08 17:54:28 +03:00
return dev ? to_ccwdev ( dev ) : NULL ;
}
2012-05-15 19:52:07 +04:00
EXPORT_SYMBOL_GPL ( get_ccwdev_by_dev_id ) ;
2006-12-08 17:54:28 +03:00
2009-12-07 14:51:19 +03:00
static void ccw_device_do_unbind_bind ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2009-03-26 17:24:05 +03:00
int ret ;
2005-04-17 02:20:36 +04:00
2009-12-07 14:51:33 +03:00
if ( device_is_registered ( & cdev - > dev ) ) {
2009-03-26 17:24:05 +03:00
device_release_driver ( & cdev - > dev ) ;
ret = device_attach ( & cdev - > dev ) ;
WARN_ON ( ret = = - ENODEV ) ;
}
2005-04-17 02:20:36 +04:00
}
static void
ccw_device_release ( struct device * dev )
{
struct ccw_device * cdev ;
cdev = to_ccwdev ( dev ) ;
2019-03-26 14:41:09 +03:00
cio_gp_dma_free ( cdev - > private - > dma_pool , cdev - > private - > dma_area ,
sizeof ( * cdev - > private - > dma_area ) ) ;
cio_gp_dma_destroy ( cdev - > private - > dma_pool , & cdev - > dev ) ;
2008-12-25 15:39:07 +03:00
/* Release reference of parent subchannel. */
put_device ( cdev - > dev . parent ) ;
2005-04-17 02:20:36 +04:00
kfree ( cdev - > private ) ;
kfree ( cdev ) ;
}
2006-12-08 17:54:21 +03:00
static struct ccw_device * io_subchannel_allocate_dev ( struct subchannel * sch )
{
struct ccw_device * cdev ;
2019-03-26 14:41:09 +03:00
struct gen_pool * dma_pool ;
2006-12-08 17:54:21 +03:00
cdev = kzalloc ( sizeof ( * cdev ) , GFP_KERNEL ) ;
2019-03-26 14:41:09 +03:00
if ( ! cdev )
goto err_cdev ;
cdev - > private = kzalloc ( sizeof ( struct ccw_device_private ) ,
GFP_KERNEL | GFP_DMA ) ;
if ( ! cdev - > private )
goto err_priv ;
cdev - > dev . coherent_dma_mask = sch - > dev . coherent_dma_mask ;
2019-09-30 18:38:02 +03:00
cdev - > dev . dma_mask = sch - > dev . dma_mask ;
2019-03-26 14:41:09 +03:00
dma_pool = cio_gp_dma_create ( & cdev - > dev , 1 ) ;
if ( ! dma_pool )
goto err_dma_pool ;
cdev - > private - > dma_pool = dma_pool ;
cdev - > private - > dma_area = cio_gp_dma_zalloc ( dma_pool , & cdev - > dev ,
sizeof ( * cdev - > private - > dma_area ) ) ;
if ( ! cdev - > private - > dma_area )
goto err_dma_area ;
return cdev ;
err_dma_area :
cio_gp_dma_destroy ( dma_pool , & cdev - > dev ) ;
err_dma_pool :
kfree ( cdev - > private ) ;
err_priv :
2006-12-08 17:54:21 +03:00
kfree ( cdev ) ;
2019-03-26 14:41:09 +03:00
err_cdev :
2006-12-08 17:54:21 +03:00
return ERR_PTR ( - ENOMEM ) ;
}
2009-12-07 14:51:19 +03:00
static void ccw_device_todo ( struct work_struct * work ) ;
2006-12-08 17:54:21 +03:00
static int io_subchannel_initialize_dev ( struct subchannel * sch ,
struct ccw_device * cdev )
{
2014-06-11 15:06:57 +04:00
struct ccw_device_private * priv = cdev - > private ;
int ret ;
priv - > cdev = cdev ;
priv - > int_class = IRQIO_CIO ;
priv - > state = DEV_STATE_NOT_OPER ;
priv - > dev_id . devno = sch - > schib . pmcw . dev ;
priv - > dev_id . ssid = sch - > schid . ssid ;
INIT_WORK ( & priv - > todo_work , ccw_device_todo ) ;
INIT_LIST_HEAD ( & priv - > cmb_list ) ;
init_waitqueue_head ( & priv - > wait_q ) ;
2017-10-17 02:43:25 +03:00
timer_setup ( & priv - > timer , ccw_device_timeout , 0 ) ;
2014-06-11 15:06:57 +04:00
atomic_set ( & priv - > onoff , 0 ) ;
cdev - > ccwlock = sch - > lock ;
2006-12-08 17:54:21 +03:00
cdev - > dev . parent = & sch - > dev ;
cdev - > dev . release = ccw_device_release ;
2007-04-27 18:01:39 +04:00
cdev - > dev . groups = ccwdev_attr_groups ;
2006-12-08 17:54:21 +03:00
/* Do first half of device_register. */
device_initialize ( & cdev - > dev ) ;
2014-06-11 15:06:57 +04:00
ret = dev_set_name ( & cdev - > dev , " 0.%x.%04x " , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
if ( ret )
goto out_put ;
2006-12-08 17:54:21 +03:00
if ( ! get_device ( & sch - > dev ) ) {
2014-06-11 15:06:57 +04:00
ret = - ENODEV ;
goto out_put ;
2006-12-08 17:54:21 +03:00
}
2014-06-11 15:06:57 +04:00
priv - > flags . initialized = 1 ;
spin_lock_irq ( sch - > lock ) ;
sch_set_cdev ( sch , cdev ) ;
spin_unlock_irq ( sch - > lock ) ;
2006-12-08 17:54:21 +03:00
return 0 ;
2014-06-11 15:06:57 +04:00
out_put :
/* Release reference from device_initialize(). */
put_device ( & cdev - > dev ) ;
return ret ;
2006-12-08 17:54:21 +03:00
}
static struct ccw_device * io_subchannel_create_ccwdev ( struct subchannel * sch )
{
struct ccw_device * cdev ;
int ret ;
cdev = io_subchannel_allocate_dev ( sch ) ;
if ( ! IS_ERR ( cdev ) ) {
ret = io_subchannel_initialize_dev ( sch , cdev ) ;
2009-08-23 20:09:04 +04:00
if ( ret )
2006-12-08 17:54:21 +03:00
cdev = ERR_PTR ( ret ) ;
}
return cdev ;
}
2009-12-07 14:51:21 +03:00
static void io_subchannel_recog ( struct ccw_device * , struct subchannel * ) ;
2006-12-08 17:54:28 +03:00
static void sch_create_and_recog_new_device ( struct subchannel * sch )
{
struct ccw_device * cdev ;
/* Need to allocate a new ccw device. */
cdev = io_subchannel_create_ccwdev ( sch ) ;
if ( IS_ERR ( cdev ) ) {
/* OK, we did everything we could... */
css_sch_device_unregister ( sch ) ;
return ;
}
/* Start recognition for the new ccw device. */
2009-12-07 14:51:21 +03:00
io_subchannel_recog ( cdev , sch ) ;
2006-12-08 17:54:28 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Register recognized device .
*/
2009-12-07 14:51:19 +03:00
static void io_subchannel_register ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct subchannel * sch ;
2010-03-08 14:25:17 +03:00
int ret , adjust_init_count = 1 ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2008-12-25 15:39:08 +03:00
/*
* Check if subchannel is still registered . It may have become
* unregistered if a machine check hit us after finishing
* device recognition but before the register work could be
* queued .
*/
if ( ! device_is_registered ( & sch - > dev ) )
goto out_err ;
2007-04-27 18:01:36 +04:00
css_update_ssd_info ( sch ) ;
2006-12-04 17:41:07 +03:00
/*
* io_subchannel_register ( ) will also be called after device
* recognition has been done for a boxed device ( which will already
* be registered ) . We need to reprobe since we may now have sense id
* information .
*/
2008-12-25 15:39:11 +03:00
if ( device_is_registered ( & cdev - > dev ) ) {
2006-12-04 17:41:07 +03:00
if ( ! cdev - > drv ) {
ret = device_reprobe ( & cdev - > dev ) ;
if ( ret )
/* We can't do much here. */
2008-05-07 11:22:54 +04:00
CIO_MSG_EVENT ( 0 , " device_reprobe() returned "
2007-07-27 14:29:19 +04:00
" %d for 0.%x.%04x \n " , ret ,
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
2006-12-04 17:41:07 +03:00
}
2010-03-08 14:25:17 +03:00
adjust_init_count = 0 ;
2005-04-17 02:20:36 +04:00
goto out ;
}
2007-04-26 11:12:03 +04:00
/*
* Now we know this subchannel will stay , we can throw
* our delayed uevent .
*/
2020-03-27 15:45:02 +03:00
if ( dev_get_uevent_suppress ( & sch - > dev ) ) {
dev_set_uevent_suppress ( & sch - > dev , 0 ) ;
kobject_uevent ( & sch - > dev . kobj , KOBJ_ADD ) ;
}
2005-04-17 02:20:36 +04:00
/* make it known to the system */
2014-06-11 15:06:57 +04:00
ret = ccw_device_add ( cdev ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
2007-07-27 14:29:19 +04:00
CIO_MSG_EVENT ( 0 , " Could not register ccw dev 0.%x.%04x: %d \n " ,
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno , ret ) ;
2006-12-08 17:54:26 +03:00
spin_lock_irqsave ( sch - > lock , flags ) ;
2008-01-26 16:10:46 +03:00
sch_set_cdev ( sch , NULL ) ;
2006-12-08 17:54:26 +03:00
spin_unlock_irqrestore ( sch - > lock , flags ) ;
2008-12-25 15:39:07 +03:00
/* Release initial device reference. */
put_device ( & cdev - > dev ) ;
2008-12-25 15:39:08 +03:00
goto out_err ;
2005-04-17 02:20:36 +04:00
}
out :
cdev - > private - > flags . recog_done = 1 ;
wake_up ( & cdev - > private - > wait_q ) ;
2008-12-25 15:39:08 +03:00
out_err :
2010-03-08 14:25:17 +03:00
if ( adjust_init_count & & atomic_dec_and_test ( & ccw_device_init_count ) )
2005-04-17 02:20:36 +04:00
wake_up ( & ccw_device_init_wq ) ;
}
2009-12-07 14:51:19 +03:00
static void ccw_device_call_sch_unregister ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct subchannel * sch ;
2008-10-10 23:33:05 +04:00
/* Get subchannel reference for local processing. */
if ( ! get_device ( cdev - > dev . parent ) )
return ;
2005-04-17 02:20:36 +04:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2006-07-12 18:39:50 +04:00
css_sch_device_unregister ( sch ) ;
2008-10-10 23:33:05 +04:00
/* Release subchannel reference for local processing. */
2005-04-17 02:20:36 +04:00
put_device ( & sch - > dev ) ;
}
/*
* subchannel recognition done . Called from the state machine .
*/
void
io_subchannel_recog_done ( struct ccw_device * cdev )
{
if ( css_init_done = = 0 ) {
cdev - > private - > flags . recog_done = 1 ;
return ;
}
switch ( cdev - > private - > state ) {
2009-03-31 21:16:05 +04:00
case DEV_STATE_BOXED :
/* Device did not respond in time. */
2005-04-17 02:20:36 +04:00
case DEV_STATE_NOT_OPER :
cdev - > private - > flags . recog_done = 1 ;
2009-12-07 14:51:19 +03:00
/* Remove device found not operational. */
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2005-04-17 02:20:36 +04:00
if ( atomic_dec_and_test ( & ccw_device_init_count ) )
wake_up ( & ccw_device_init_wq ) ;
break ;
case DEV_STATE_OFFLINE :
2019-03-26 14:41:09 +03:00
/*
2005-04-17 02:20:36 +04:00
* We can ' t register the device in interrupt context so
* we schedule a work item .
*/
2009-12-07 14:51:19 +03:00
ccw_device_sched_todo ( cdev , CDEV_TODO_REGISTER ) ;
2005-04-17 02:20:36 +04:00
break ;
}
}
2009-12-07 14:51:21 +03:00
static void io_subchannel_recog ( struct ccw_device * cdev , struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
/* Increase counter of devices currently in recognition. */
atomic_inc ( & ccw_device_init_count ) ;
/* Start async. device sensing. */
2006-12-08 17:54:26 +03:00
spin_lock_irq ( sch - > lock ) ;
2009-12-07 14:51:21 +03:00
ccw_device_recognition ( cdev ) ;
2006-12-08 17:54:26 +03:00
spin_unlock_irq ( sch - > lock ) ;
2005-04-17 02:20:36 +04:00
}
2009-12-07 14:51:17 +03:00
static int ccw_device_move_to_sch ( struct ccw_device * cdev ,
struct subchannel * sch )
2006-12-08 17:54:28 +03:00
{
2009-12-07 14:51:17 +03:00
struct subchannel * old_sch ;
2009-12-07 14:51:37 +03:00
int rc , old_enabled = 0 ;
2006-12-08 17:54:28 +03:00
2009-12-07 14:51:17 +03:00
old_sch = to_subchannel ( cdev - > dev . parent ) ;
/* Obtain child reference for new parent. */
2008-12-25 15:39:07 +03:00
if ( ! get_device ( & sch - > dev ) )
2009-12-07 14:51:17 +03:00
return - ENODEV ;
2009-12-07 14:51:37 +03:00
if ( ! sch_is_pseudo_sch ( old_sch ) ) {
spin_lock_irq ( old_sch - > lock ) ;
old_enabled = old_sch - > schib . pmcw . ena ;
rc = 0 ;
if ( old_enabled )
rc = cio_disable_subchannel ( old_sch ) ;
spin_unlock_irq ( old_sch - > lock ) ;
if ( rc = = - EBUSY ) {
/* Release child reference for new parent. */
put_device ( & sch - > dev ) ;
return rc ;
}
}
2006-12-08 17:54:28 +03:00
mutex_lock ( & sch - > reg_mutex ) ;
2009-03-04 14:44:00 +03:00
rc = device_move ( & cdev - > dev , & sch - > dev , DPM_ORDER_PARENT_BEFORE_DEV ) ;
2006-12-08 17:54:28 +03:00
mutex_unlock ( & sch - > reg_mutex ) ;
if ( rc ) {
2009-12-07 14:51:17 +03:00
CIO_MSG_EVENT ( 0 , " device_move(0.%x.%04x,0.%x.%04x)=%d \n " ,
2006-12-08 17:54:28 +03:00
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno , sch - > schid . ssid ,
2009-12-07 14:51:17 +03:00
sch - > schib . pmcw . dev , rc ) ;
2009-12-07 14:51:37 +03:00
if ( old_enabled ) {
/* Try to reenable the old subchannel. */
spin_lock_irq ( old_sch - > lock ) ;
cio_enable_subchannel ( old_sch , ( u32 ) ( addr_t ) old_sch ) ;
spin_unlock_irq ( old_sch - > lock ) ;
}
2009-12-07 14:51:17 +03:00
/* Release child reference for new parent. */
2008-12-25 15:39:07 +03:00
put_device ( & sch - > dev ) ;
2009-12-07 14:51:17 +03:00
return rc ;
2006-12-08 17:54:28 +03:00
}
2009-12-07 14:51:17 +03:00
/* Clean up old subchannel. */
if ( ! sch_is_pseudo_sch ( old_sch ) ) {
spin_lock_irq ( old_sch - > lock ) ;
sch_set_cdev ( old_sch , NULL ) ;
spin_unlock_irq ( old_sch - > lock ) ;
css_schedule_eval ( old_sch - > schid ) ;
2006-12-08 17:54:28 +03:00
}
2009-12-07 14:51:17 +03:00
/* Release child reference for old parent. */
put_device ( & old_sch - > dev ) ;
/* Initialize new subchannel. */
spin_lock_irq ( sch - > lock ) ;
cdev - > ccwlock = sch - > lock ;
if ( ! sch_is_pseudo_sch ( sch ) )
sch_set_cdev ( sch , cdev ) ;
spin_unlock_irq ( sch - > lock ) ;
if ( ! sch_is_pseudo_sch ( sch ) )
css_update_ssd_info ( sch ) ;
return 0 ;
}
static int ccw_device_move_to_orph ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
struct channel_subsystem * css = to_css ( sch - > dev . parent ) ;
return ccw_device_move_to_sch ( cdev , css - > pseudo_subchannel ) ;
2006-12-08 17:54:28 +03:00
}
2008-01-26 16:10:39 +03:00
static void io_subchannel_irq ( struct subchannel * sch )
{
struct ccw_device * cdev ;
2008-01-26 16:10:46 +03:00
cdev = sch_get_cdev ( sch ) ;
2008-01-26 16:10:39 +03:00
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 6 , " IRQ " ) ;
CIO_TRACE_EVENT ( 6 , dev_name ( & sch - > dev ) ) ;
2008-01-26 16:10:39 +03:00
if ( cdev )
dev_fsm_event ( cdev , DEV_EVENT_INTERRUPT ) ;
2011-10-30 18:16:04 +04:00
else
2013-01-02 18:18:18 +04:00
inc_irq_stat ( IRQIO_CIO ) ;
2008-01-26 16:10:39 +03:00
}
2008-12-25 15:39:13 +03:00
void io_subchannel_init_config ( struct subchannel * sch )
{
memset ( & sch - > config , 0 , sizeof ( sch - > config ) ) ;
sch - > config . csense = 1 ;
}
2008-07-14 11:58:43 +04:00
static void io_subchannel_init_fields ( struct subchannel * sch )
{
if ( cio_is_console ( sch - > schid ) )
sch - > opm = 0xff ;
else
sch - > opm = chp_get_sch_opm ( sch ) ;
sch - > lpm = sch - > schib . pmcw . pam & sch - > opm ;
2008-07-14 11:58:58 +04:00
sch - > isc = cio_is_console ( sch - > schid ) ? CONSOLE_ISC : IO_SCH_ISC ;
2008-07-14 11:58:43 +04:00
CIO_MSG_EVENT ( 6 , " Detected device %04x on subchannel 0.%x.%04X "
" - PIM = %02X, PAM = %02X, POM = %02X \n " ,
sch - > schib . pmcw . dev , sch - > schid . ssid ,
sch - > schid . sch_no , sch - > schib . pmcw . pim ,
sch - > schib . pmcw . pam , sch - > schib . pmcw . pom ) ;
2008-12-25 15:39:13 +03:00
io_subchannel_init_config ( sch ) ;
2008-07-14 11:58:43 +04:00
}
2008-12-25 15:39:09 +03:00
/*
* Note : We always return 0 so that we bind to the device even on error .
* This is needed so that our remove function is called on unregister .
*/
2008-07-14 11:58:43 +04:00
static int io_subchannel_probe ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
2011-03-15 19:08:27 +03:00
struct io_subchannel_private * io_priv ;
2005-04-17 02:20:36 +04:00
struct ccw_device * cdev ;
int rc ;
2009-10-14 14:43:50 +04:00
if ( cio_is_console ( sch - > schid ) ) {
2008-07-14 11:58:44 +04:00
rc = sysfs_create_group ( & sch - > dev . kobj ,
& io_subchannel_attr_group ) ;
if ( rc )
CIO_MSG_EVENT ( 0 , " Failed to create io subchannel "
" attributes for subchannel "
" 0.%x.%04x (rc=%d) \n " ,
sch - > schid . ssid , sch - > schid . sch_no , rc ) ;
2005-04-17 02:20:36 +04:00
/*
2009-10-14 14:43:50 +04:00
* The console subchannel already has an associated ccw_device .
2008-07-14 11:58:44 +04:00
* Throw the delayed uevent for the subchannel , register
2009-10-14 14:43:50 +04:00
* the ccw_device and exit .
2005-04-17 02:20:36 +04:00
*/
2020-03-27 15:45:02 +03:00
if ( dev_get_uevent_suppress ( & sch - > dev ) ) {
/* should always be the case for the console */
dev_set_uevent_suppress ( & sch - > dev , 0 ) ;
kobject_uevent ( & sch - > dev . kobj , KOBJ_ADD ) ;
}
2009-10-14 14:43:50 +04:00
cdev = sch_get_cdev ( sch ) ;
2014-06-11 15:06:57 +04:00
rc = ccw_device_add ( cdev ) ;
2013-04-13 15:03:03 +04:00
if ( rc ) {
/* Release online reference. */
put_device ( & cdev - > dev ) ;
goto out_schedule ;
}
2013-04-13 15:06:27 +04:00
if ( atomic_dec_and_test ( & ccw_device_init_count ) )
wake_up ( & ccw_device_init_wq ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-07-14 11:58:43 +04:00
io_subchannel_init_fields ( sch ) ;
2008-12-25 15:39:14 +03:00
rc = cio_commit_config ( sch ) ;
if ( rc )
goto out_schedule ;
2008-07-14 11:58:44 +04:00
rc = sysfs_create_group ( & sch - > dev . kobj ,
& io_subchannel_attr_group ) ;
if ( rc )
2008-12-25 15:39:09 +03:00
goto out_schedule ;
2008-01-26 16:10:43 +03:00
/* Allocate I/O subchannel private data. */
2011-03-15 19:08:27 +03:00
io_priv = kzalloc ( sizeof ( * io_priv ) , GFP_KERNEL | GFP_DMA ) ;
if ( ! io_priv )
2009-12-07 14:51:15 +03:00
goto out_schedule ;
2011-03-15 19:08:27 +03:00
2019-03-26 14:41:09 +03:00
io_priv - > dma_area = dma_alloc_coherent ( & sch - > dev ,
sizeof ( * io_priv - > dma_area ) ,
& io_priv - > dma_area_dma , GFP_KERNEL ) ;
if ( ! io_priv - > dma_area ) {
kfree ( io_priv ) ;
goto out_schedule ;
}
2011-03-15 19:08:27 +03:00
set_io_private ( sch , io_priv ) ;
2009-12-07 14:51:17 +03:00
css_schedule_eval ( sch - > schid ) ;
2008-07-14 11:58:44 +04:00
return 0 ;
2009-12-07 14:51:15 +03:00
2008-12-25 15:39:09 +03:00
out_schedule :
2009-12-07 14:51:18 +03:00
spin_lock_irq ( sch - > lock ) ;
css_sched_sch_todo ( sch , SCH_TODO_UNREG ) ;
spin_unlock_irq ( sch - > lock ) ;
2008-12-25 15:39:09 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2018-03-15 17:03:43 +03:00
static int io_subchannel_remove ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
2011-03-15 19:08:27 +03:00
struct io_subchannel_private * io_priv = to_io_private ( sch ) ;
2005-04-17 02:20:36 +04:00
struct ccw_device * cdev ;
2008-01-26 16:10:46 +03:00
cdev = sch_get_cdev ( sch ) ;
if ( ! cdev )
2009-12-07 14:51:15 +03:00
goto out_free ;
2018-03-15 17:03:43 +03:00
ccw_device_unregister ( cdev ) ;
spin_lock_irq ( sch - > lock ) ;
2008-01-26 16:10:46 +03:00
sch_set_cdev ( sch , NULL ) ;
2011-03-15 19:08:27 +03:00
set_io_private ( sch , NULL ) ;
2018-03-15 17:03:43 +03:00
spin_unlock_irq ( sch - > lock ) ;
2009-12-07 14:51:15 +03:00
out_free :
2019-03-26 14:41:09 +03:00
dma_free_coherent ( & sch - > dev , sizeof ( * io_priv - > dma_area ) ,
io_priv - > dma_area , io_priv - > dma_area_dma ) ;
2011-03-15 19:08:27 +03:00
kfree ( io_priv ) ;
2008-07-14 11:58:44 +04:00
sysfs_remove_group ( & sch - > dev . kobj , & io_subchannel_attr_group ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-01-26 16:10:39 +03:00
static void io_subchannel_verify ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev ;
2008-01-26 16:10:46 +03:00
cdev = sch_get_cdev ( sch ) ;
2005-04-17 02:20:36 +04:00
if ( cdev )
dev_fsm_event ( cdev , DEV_EVENT_VERIFY ) ;
}
2008-07-14 11:58:45 +04:00
static void io_subchannel_terminate_path ( struct subchannel * sch , u8 mask )
{
struct ccw_device * cdev ;
cdev = sch_get_cdev ( sch ) ;
if ( ! cdev )
return ;
2009-12-07 14:51:29 +03:00
if ( cio_update_schib ( sch ) )
goto err ;
/* Check for I/O on path. */
if ( scsw_actl ( & sch - > schib . scsw ) = = 0 | | sch - > schib . pmcw . lpum ! = mask )
goto out ;
if ( cdev - > private - > state = = DEV_STATE_ONLINE ) {
ccw_device_kill_io ( cdev ) ;
goto out ;
}
if ( cio_clear ( sch ) )
goto err ;
out :
/* Trigger path verification. */
dev_fsm_event ( cdev , DEV_EVENT_VERIFY ) ;
return ;
2008-07-14 11:58:45 +04:00
2009-12-07 14:51:29 +03:00
err :
dev_fsm_event ( cdev , DEV_EVENT_NOTOPER ) ;
2008-07-14 11:58:45 +04:00
}
2008-07-14 11:59:02 +04:00
static int io_subchannel_chp_event ( struct subchannel * sch ,
struct chp_link * link , int event )
2008-07-14 11:58:45 +04:00
{
2010-10-25 18:10:34 +04:00
struct ccw_device * cdev = sch_get_cdev ( sch ) ;
2008-07-14 11:58:45 +04:00
int mask ;
2008-07-14 11:59:02 +04:00
mask = chp_ssd_get_mask ( & sch - > ssd_info , link ) ;
2008-07-14 11:58:45 +04:00
if ( ! mask )
return 0 ;
switch ( event ) {
case CHP_VARY_OFF :
sch - > opm & = ~ mask ;
sch - > lpm & = ~ mask ;
2010-10-25 18:10:34 +04:00
if ( cdev )
cdev - > private - > path_gone_mask | = mask ;
2008-07-14 11:58:45 +04:00
io_subchannel_terminate_path ( sch , mask ) ;
break ;
case CHP_VARY_ON :
sch - > opm | = mask ;
sch - > lpm | = mask ;
2010-10-25 18:10:34 +04:00
if ( cdev )
cdev - > private - > path_new_mask | = mask ;
2008-07-14 11:58:45 +04:00
io_subchannel_verify ( sch ) ;
break ;
case CHP_OFFLINE :
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
2008-07-14 11:58:45 +04:00
return - ENODEV ;
2010-10-25 18:10:34 +04:00
if ( cdev )
cdev - > private - > path_gone_mask | = mask ;
2008-07-14 11:58:45 +04:00
io_subchannel_terminate_path ( sch , mask ) ;
break ;
case CHP_ONLINE :
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
return - ENODEV ;
2008-07-14 11:58:45 +04:00
sch - > lpm | = mask & sch - > opm ;
2010-10-25 18:10:34 +04:00
if ( cdev )
cdev - > private - > path_new_mask | = mask ;
2008-07-14 11:58:45 +04:00
io_subchannel_verify ( sch ) ;
break ;
}
return 0 ;
}
2009-12-07 14:51:38 +03:00
static void io_subchannel_quiesce ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev ;
int ret ;
2009-12-07 14:51:35 +03:00
spin_lock_irq ( sch - > lock ) ;
2008-01-26 16:10:46 +03:00
cdev = sch_get_cdev ( sch ) ;
2006-01-06 11:19:21 +03:00
if ( cio_is_console ( sch - > schid ) )
2009-12-07 14:51:35 +03:00
goto out_unlock ;
2005-04-17 02:20:36 +04:00
if ( ! sch - > schib . pmcw . ena )
2009-12-07 14:51:35 +03:00
goto out_unlock ;
2005-04-17 02:20:36 +04:00
ret = cio_disable_subchannel ( sch ) ;
if ( ret ! = - EBUSY )
2009-12-07 14:51:35 +03:00
goto out_unlock ;
2005-04-17 02:20:36 +04:00
if ( cdev - > handler )
2009-12-07 14:51:35 +03:00
cdev - > handler ( cdev , cdev - > private - > intparm , ERR_PTR ( - EIO ) ) ;
while ( ret = = - EBUSY ) {
cdev - > private - > state = DEV_STATE_QUIESCE ;
2010-10-25 18:10:44 +04:00
cdev - > private - > iretry = 255 ;
2009-12-07 14:51:35 +03:00
ret = ccw_device_cancel_halt_clear ( cdev ) ;
if ( ret = = - EBUSY ) {
ccw_device_set_timeout ( cdev , HZ / 10 ) ;
spin_unlock_irq ( sch - > lock ) ;
wait_event ( cdev - > private - > wait_q ,
cdev - > private - > state ! = DEV_STATE_QUIESCE ) ;
spin_lock_irq ( sch - > lock ) ;
}
ret = cio_disable_subchannel ( sch ) ;
2005-04-17 02:20:36 +04:00
}
2009-12-07 14:51:35 +03:00
out_unlock :
spin_unlock_irq ( sch - > lock ) ;
2005-04-17 02:20:36 +04:00
}
2009-12-07 14:51:38 +03:00
static void io_subchannel_shutdown ( struct subchannel * sch )
{
io_subchannel_quiesce ( sch ) ;
}
2008-07-14 11:58:45 +04:00
static int device_is_disconnected ( struct ccw_device * cdev )
{
if ( ! cdev )
return 0 ;
return ( cdev - > private - > state = = DEV_STATE_DISCONNECTED | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED_SENSE_ID ) ;
}
static int recovery_check ( struct device * dev , void * data )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2017-09-14 14:55:22 +03:00
struct subchannel * sch ;
2008-07-14 11:58:45 +04:00
int * redo = data ;
spin_lock_irq ( cdev - > ccwlock ) ;
switch ( cdev - > private - > state ) {
2017-09-14 14:55:22 +03:00
case DEV_STATE_ONLINE :
sch = to_subchannel ( cdev - > dev . parent ) ;
if ( ( sch - > schib . pmcw . pam & sch - > opm ) = = sch - > vpm )
break ;
2020-03-10 23:39:50 +03:00
fallthrough ;
2008-07-14 11:58:45 +04:00
case DEV_STATE_DISCONNECTED :
CIO_MSG_EVENT ( 3 , " recovery: trigger 0.%x.%04x \n " ,
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
dev_fsm_event ( cdev , DEV_EVENT_VERIFY ) ;
* redo = 1 ;
break ;
case DEV_STATE_DISCONNECTED_SENSE_ID :
* redo = 1 ;
break ;
}
spin_unlock_irq ( cdev - > ccwlock ) ;
return 0 ;
}
static void recovery_work_func ( struct work_struct * unused )
{
int redo = 0 ;
bus_for_each_dev ( & ccw_bus_type , NULL , & redo , recovery_check ) ;
if ( redo ) {
spin_lock_irq ( & recovery_lock ) ;
if ( ! timer_pending ( & recovery_timer ) ) {
if ( recovery_phase < ARRAY_SIZE ( recovery_delay ) - 1 )
recovery_phase + + ;
mod_timer ( & recovery_timer , jiffies +
recovery_delay [ recovery_phase ] * HZ ) ;
}
spin_unlock_irq ( & recovery_lock ) ;
} else
2017-09-14 14:55:22 +03:00
CIO_MSG_EVENT ( 3 , " recovery: end \n " ) ;
2008-07-14 11:58:45 +04:00
}
static DECLARE_WORK ( recovery_work , recovery_work_func ) ;
2017-10-17 02:43:25 +03:00
static void recovery_func ( struct timer_list * unused )
2008-07-14 11:58:45 +04:00
{
/*
* We can ' t do our recovery in softirq context and it ' s not
* performance critical , so we schedule it .
*/
schedule_work ( & recovery_work ) ;
}
2017-09-14 14:55:22 +03:00
void ccw_device_schedule_recovery ( void )
2008-07-14 11:58:45 +04:00
{
unsigned long flags ;
2017-09-14 14:55:22 +03:00
CIO_MSG_EVENT ( 3 , " recovery: schedule \n " ) ;
2008-07-14 11:58:45 +04:00
spin_lock_irqsave ( & recovery_lock , flags ) ;
if ( ! timer_pending ( & recovery_timer ) | | ( recovery_phase ! = 0 ) ) {
recovery_phase = 0 ;
mod_timer ( & recovery_timer , jiffies + recovery_delay [ 0 ] * HZ ) ;
}
spin_unlock_irqrestore ( & recovery_lock , flags ) ;
}
2008-10-10 23:33:06 +04:00
static int purge_fn ( struct device * dev , void * data )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2009-12-07 14:51:19 +03:00
struct ccw_dev_id * id = & cdev - > private - > dev_id ;
2008-10-10 23:33:06 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2009-12-07 14:51:19 +03:00
if ( is_blacklisted ( id - > ssid , id - > devno ) & &
2011-04-04 11:43:32 +04:00
( cdev - > private - > state = = DEV_STATE_OFFLINE ) & &
( atomic_cmpxchg ( & cdev - > private - > onoff , 0 , 1 ) = = 0 ) ) {
2009-12-07 14:51:19 +03:00
CIO_MSG_EVENT ( 3 , " ccw: purging 0.%x.%04x \n " , id - > ssid ,
id - > devno ) ;
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2011-04-04 11:43:32 +04:00
atomic_set ( & cdev - > private - > onoff , 0 ) ;
2009-12-07 14:51:19 +03:00
}
2008-10-10 23:33:06 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
/* Abort loop in case of pending signal. */
if ( signal_pending ( current ) )
return - EINTR ;
return 0 ;
}
/**
* ccw_purge_blacklisted - purge unused , blacklisted devices
*
* Unregister all ccw devices that are offline and on the blacklist .
*/
int ccw_purge_blacklisted ( void )
{
CIO_MSG_EVENT ( 2 , " ccw: purging blacklisted devices \n " ) ;
bus_for_each_dev ( & ccw_bus_type , NULL , NULL , purge_fn ) ;
return 0 ;
}
2009-10-06 12:34:02 +04:00
void ccw_device_set_disconnected ( struct ccw_device * cdev )
2008-07-14 11:58:45 +04:00
{
if ( ! cdev )
return ;
ccw_device_set_timeout ( cdev , 0 ) ;
cdev - > private - > flags . fake_irb = 0 ;
cdev - > private - > state = DEV_STATE_DISCONNECTED ;
if ( cdev - > online )
ccw_device_schedule_recovery ( ) ;
}
2008-08-21 21:46:39 +04:00
void ccw_device_set_notoper ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
CIO_TRACE_EVENT ( 2 , " notoper " ) ;
2008-10-10 23:33:11 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2008-08-21 21:46:39 +04:00
ccw_device_set_timeout ( cdev , 0 ) ;
cio_disable_subchannel ( sch ) ;
cdev - > private - > state = DEV_STATE_NOT_OPER ;
}
2009-12-07 14:51:17 +03:00
enum io_sch_action {
IO_SCH_UNREG ,
IO_SCH_ORPH_UNREG ,
IO_SCH_ATTACH ,
IO_SCH_UNREG_ATTACH ,
IO_SCH_ORPH_ATTACH ,
IO_SCH_REPROBE ,
IO_SCH_VERIFY ,
IO_SCH_DISC ,
IO_SCH_NOP ,
} ;
static enum io_sch_action sch_get_action ( struct subchannel * sch )
{
struct ccw_device * cdev ;
cdev = sch_get_cdev ( sch ) ;
if ( cio_update_schib ( sch ) ) {
/* Not operational. */
if ( ! cdev )
return IO_SCH_UNREG ;
2010-02-27 00:37:28 +03:00
if ( ccw_device_notify ( cdev , CIO_GONE ) ! = NOTIFY_OK )
2009-12-07 14:51:17 +03:00
return IO_SCH_UNREG ;
return IO_SCH_ORPH_UNREG ;
}
/* Operational. */
if ( ! cdev )
return IO_SCH_ATTACH ;
if ( sch - > schib . pmcw . dev ! = cdev - > private - > dev_id . devno ) {
2010-02-27 00:37:28 +03:00
if ( ccw_device_notify ( cdev , CIO_GONE ) ! = NOTIFY_OK )
2009-12-07 14:51:17 +03:00
return IO_SCH_UNREG_ATTACH ;
return IO_SCH_ORPH_ATTACH ;
}
if ( ( sch - > schib . pmcw . pam & sch - > opm ) = = 0 ) {
2010-02-27 00:37:28 +03:00
if ( ccw_device_notify ( cdev , CIO_NO_PATH ) ! = NOTIFY_OK )
2009-12-07 14:51:17 +03:00
return IO_SCH_UNREG ;
return IO_SCH_DISC ;
}
if ( device_is_disconnected ( cdev ) )
return IO_SCH_REPROBE ;
2012-10-24 13:22:52 +04:00
if ( cdev - > online & & ! cdev - > private - > flags . resuming )
2009-12-07 14:51:17 +03:00
return IO_SCH_VERIFY ;
2012-09-05 16:19:42 +04:00
if ( cdev - > private - > state = = DEV_STATE_NOT_OPER )
return IO_SCH_UNREG_ATTACH ;
2009-12-07 14:51:17 +03:00
return IO_SCH_NOP ;
}
/**
* io_subchannel_sch_event - process subchannel event
* @ sch : subchannel
* @ process : non - zero if function is called in process context
*
* An unspecified event occurred for this subchannel . Adjust data according
* to the current operational state of the subchannel and device . Return
* zero when the event has been handled sufficiently or - EAGAIN when this
* function should be called again in process context .
*/
static int io_subchannel_sch_event ( struct subchannel * sch , int process )
2008-07-14 11:58:45 +04:00
{
unsigned long flags ;
struct ccw_device * cdev ;
2009-12-07 14:51:17 +03:00
struct ccw_dev_id dev_id ;
enum io_sch_action action ;
int rc = - EAGAIN ;
2008-07-14 11:58:45 +04:00
spin_lock_irqsave ( sch - > lock , flags ) ;
2009-12-07 14:51:17 +03:00
if ( ! device_is_registered ( & sch - > dev ) )
goto out_unlock ;
2009-12-07 14:51:18 +03:00
if ( work_pending ( & sch - > todo_work ) )
goto out_unlock ;
2009-12-07 14:51:19 +03:00
cdev = sch_get_cdev ( sch ) ;
if ( cdev & & work_pending ( & cdev - > private - > todo_work ) )
goto out_unlock ;
2009-12-07 14:51:17 +03:00
action = sch_get_action ( sch ) ;
CIO_MSG_EVENT ( 2 , " event: sch 0.%x.%04x, process=%d, action=%d \n " ,
sch - > schid . ssid , sch - > schid . sch_no , process ,
action ) ;
/* Perform immediate actions while holding the lock. */
switch ( action ) {
case IO_SCH_REPROBE :
/* Trigger device recognition. */
ccw_device_trigger_reprobe ( cdev ) ;
rc = 0 ;
goto out_unlock ;
case IO_SCH_VERIFY :
/* Trigger path verification. */
io_subchannel_verify ( sch ) ;
rc = 0 ;
goto out_unlock ;
case IO_SCH_DISC :
ccw_device_set_disconnected ( cdev ) ;
rc = 0 ;
goto out_unlock ;
case IO_SCH_ORPH_UNREG :
case IO_SCH_ORPH_ATTACH :
ccw_device_set_disconnected ( cdev ) ;
break ;
case IO_SCH_UNREG_ATTACH :
case IO_SCH_UNREG :
2010-11-10 12:05:53 +03:00
if ( ! cdev )
break ;
if ( cdev - > private - > state = = DEV_STATE_SENSE_ID ) {
/*
* Note : delayed work triggered by this event
* and repeated calls to sch_event are synchronized
* by the above check for work_pending ( cdev ) .
*/
dev_fsm_event ( cdev , DEV_EVENT_NOTOPER ) ;
} else
2009-12-07 14:51:17 +03:00
ccw_device_set_notoper ( cdev ) ;
break ;
case IO_SCH_NOP :
rc = 0 ;
goto out_unlock ;
default :
break ;
2008-07-14 11:58:45 +04:00
}
2009-12-07 14:51:17 +03:00
spin_unlock_irqrestore ( sch - > lock , flags ) ;
/* All other actions require process context. */
if ( ! process )
goto out ;
/* Handle attached ccw device. */
switch ( action ) {
case IO_SCH_ORPH_UNREG :
case IO_SCH_ORPH_ATTACH :
/* Move ccw device to orphanage. */
rc = ccw_device_move_to_orph ( cdev ) ;
if ( rc )
goto out ;
2008-07-14 11:58:45 +04:00
break ;
2009-12-07 14:51:17 +03:00
case IO_SCH_UNREG_ATTACH :
2012-09-05 16:20:41 +04:00
spin_lock_irqsave ( sch - > lock , flags ) ;
2010-10-25 18:10:26 +04:00
if ( cdev - > private - > flags . resuming ) {
/* Device will be handled later. */
rc = 0 ;
2012-09-05 16:20:41 +04:00
goto out_unlock ;
2010-10-25 18:10:26 +04:00
}
2012-09-05 16:20:41 +04:00
sch_set_cdev ( sch , NULL ) ;
spin_unlock_irqrestore ( sch - > lock , flags ) ;
2009-12-07 14:51:17 +03:00
/* Unregister ccw device. */
2010-10-25 18:10:26 +04:00
ccw_device_unregister ( cdev ) ;
2008-07-14 11:58:45 +04:00
break ;
2009-12-07 14:51:17 +03:00
default :
2008-07-14 11:58:45 +04:00
break ;
}
2009-12-07 14:51:17 +03:00
/* Handle subchannel. */
2008-07-14 11:58:45 +04:00
switch ( action ) {
2009-12-07 14:51:17 +03:00
case IO_SCH_ORPH_UNREG :
case IO_SCH_UNREG :
2010-02-27 00:37:29 +03:00
if ( ! cdev | | ! cdev - > private - > flags . resuming )
css_sch_device_unregister ( sch ) ;
2008-07-14 11:58:45 +04:00
break ;
2009-12-07 14:51:17 +03:00
case IO_SCH_ORPH_ATTACH :
case IO_SCH_UNREG_ATTACH :
case IO_SCH_ATTACH :
dev_id . ssid = sch - > schid . ssid ;
dev_id . devno = sch - > schib . pmcw . dev ;
cdev = get_ccwdev_by_dev_id ( & dev_id ) ;
if ( ! cdev ) {
sch_create_and_recog_new_device ( sch ) ;
break ;
}
rc = ccw_device_move_to_sch ( cdev , sch ) ;
if ( rc ) {
/* Release reference from get_ccwdev_by_dev_id() */
put_device ( & cdev - > dev ) ;
goto out ;
}
spin_lock_irqsave ( sch - > lock , flags ) ;
2008-07-14 11:58:45 +04:00
ccw_device_trigger_reprobe ( cdev ) ;
2009-12-07 14:51:17 +03:00
spin_unlock_irqrestore ( sch - > lock , flags ) ;
/* Release reference from get_ccwdev_by_dev_id() */
put_device ( & cdev - > dev ) ;
2008-08-21 21:46:39 +04:00
break ;
2008-07-14 11:58:45 +04:00
default :
break ;
}
2009-12-07 14:51:17 +03:00
return 0 ;
2008-07-14 11:58:45 +04:00
2009-12-07 14:51:17 +03:00
out_unlock :
spin_unlock_irqrestore ( sch - > lock , flags ) ;
out :
return rc ;
2008-07-14 11:58:45 +04:00
}
2014-01-27 16:29:15 +04:00
static void ccw_device_set_int_class ( struct ccw_device * cdev )
{
struct ccw_driver * cdrv = cdev - > drv ;
/* Note: we interpret class 0 in this context as an uninitialized
* field since it translates to a non - I / O interrupt class . */
if ( cdrv - > int_class ! = 0 )
cdev - > private - > int_class = cdrv - > int_class ;
else
cdev - > private - > int_class = IRQIO_CIO ;
}
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_CCW_CONSOLE
2014-01-27 16:28:10 +04:00
int __init ccw_device_enable_console ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
2014-01-27 16:28:10 +04:00
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
2005-04-17 02:20:36 +04:00
int rc ;
2014-01-27 16:28:10 +04:00
if ( ! cdev - > drv | | ! cdev - > handler )
return - EINVAL ;
2008-07-14 11:58:43 +04:00
io_subchannel_init_fields ( sch ) ;
2008-12-25 15:39:14 +03:00
rc = cio_commit_config ( sch ) ;
if ( rc )
return rc ;
2008-07-14 11:58:43 +04:00
sch - > driver = & io_subchannel_driver ;
2009-12-07 14:51:21 +03:00
io_subchannel_recog ( cdev , sch ) ;
2005-04-17 02:20:36 +04:00
/* Now wait for the async. recognition to come to an end. */
spin_lock_irq ( cdev - > ccwlock ) ;
while ( ! dev_fsm_final_state ( cdev ) )
2013-04-13 14:53:21 +04:00
ccw_device_wait_idle ( cdev ) ;
2013-04-13 15:03:03 +04:00
/* Hold on to an extra reference while device is online. */
get_device ( & cdev - > dev ) ;
rc = ccw_device_online ( cdev ) ;
if ( rc )
2005-04-17 02:20:36 +04:00
goto out_unlock ;
2013-04-13 15:03:03 +04:00
2005-04-17 02:20:36 +04:00
while ( ! dev_fsm_final_state ( cdev ) )
2013-04-13 14:53:21 +04:00
ccw_device_wait_idle ( cdev ) ;
2013-04-13 15:03:03 +04:00
if ( cdev - > private - > state = = DEV_STATE_ONLINE )
cdev - > online = 1 ;
else
rc = - EIO ;
2005-04-17 02:20:36 +04:00
out_unlock :
spin_unlock_irq ( cdev - > ccwlock ) ;
2013-04-13 15:03:03 +04:00
if ( rc ) /* Give up online reference since onlining failed. */
put_device ( & cdev - > dev ) ;
2009-12-07 14:51:21 +03:00
return rc ;
2005-04-17 02:20:36 +04:00
}
2014-01-27 16:28:10 +04:00
struct ccw_device * __init ccw_device_create_console ( struct ccw_driver * drv )
2005-04-17 02:20:36 +04:00
{
2013-04-13 15:01:50 +04:00
struct io_subchannel_private * io_priv ;
2013-04-13 15:03:03 +04:00
struct ccw_device * cdev ;
2005-04-17 02:20:36 +04:00
struct subchannel * sch ;
sch = cio_probe_console ( ) ;
2013-04-13 15:03:03 +04:00
if ( IS_ERR ( sch ) )
return ERR_CAST ( sch ) ;
2013-04-13 15:01:50 +04:00
io_priv = kzalloc ( sizeof ( * io_priv ) , GFP_KERNEL | GFP_DMA ) ;
2019-03-26 14:41:09 +03:00
if ( ! io_priv )
goto err_priv ;
io_priv - > dma_area = dma_alloc_coherent ( & sch - > dev ,
sizeof ( * io_priv - > dma_area ) ,
& io_priv - > dma_area_dma , GFP_KERNEL ) ;
if ( ! io_priv - > dma_area )
goto err_dma_area ;
2014-06-11 15:06:57 +04:00
set_io_private ( sch , io_priv ) ;
2013-04-13 15:03:03 +04:00
cdev = io_subchannel_create_ccwdev ( sch ) ;
if ( IS_ERR ( cdev ) ) {
2019-03-26 14:41:09 +03:00
dma_free_coherent ( & sch - > dev , sizeof ( * io_priv - > dma_area ) ,
io_priv - > dma_area , io_priv - > dma_area_dma ) ;
set_io_private ( sch , NULL ) ;
2013-04-13 15:03:03 +04:00
put_device ( & sch - > dev ) ;
kfree ( io_priv ) ;
return cdev ;
}
2014-01-27 16:26:10 +04:00
cdev - > drv = drv ;
2014-01-27 16:29:15 +04:00
ccw_device_set_int_class ( cdev ) ;
2013-04-13 15:03:03 +04:00
return cdev ;
2019-03-26 14:41:09 +03:00
err_dma_area :
kfree ( io_priv ) ;
err_priv :
put_device ( & sch - > dev ) ;
return ERR_PTR ( - ENOMEM ) ;
2005-04-17 02:20:36 +04:00
}
2008-10-10 23:33:14 +04:00
2014-01-27 16:28:10 +04:00
void __init ccw_device_destroy_console ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
struct io_subchannel_private * io_priv = to_io_private ( sch ) ;
set_io_private ( sch , NULL ) ;
put_device ( & sch - > dev ) ;
put_device ( & cdev - > dev ) ;
2019-03-26 14:41:09 +03:00
dma_free_coherent ( & sch - > dev , sizeof ( * io_priv - > dma_area ) ,
io_priv - > dma_area , io_priv - > dma_area_dma ) ;
2014-01-27 16:28:10 +04:00
kfree ( io_priv ) ;
}
2013-04-13 14:53:21 +04:00
/**
* ccw_device_wait_idle ( ) - busy wait for device to become idle
* @ cdev : ccw device
*
* Poll until activity control is zero , that is , no function or data
* transfer is pending / active .
* Called with device lock being held .
*/
void ccw_device_wait_idle ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
while ( 1 ) {
cio_tsch ( sch ) ;
if ( sch - > schib . scsw . cmd . actl = = 0 )
break ;
udelay_simple ( 100 ) ;
}
}
2009-06-16 12:30:28 +04:00
static int ccw_device_pm_restore ( struct device * dev ) ;
2013-04-13 14:56:51 +04:00
int ccw_device_force_console ( struct ccw_device * cdev )
2009-06-16 12:30:28 +04:00
{
2013-04-13 14:56:51 +04:00
return ccw_device_pm_restore ( & cdev - > dev ) ;
2009-06-16 12:30:28 +04:00
}
EXPORT_SYMBOL_GPL ( ccw_device_force_console ) ;
2005-04-17 02:20:36 +04:00
# endif
2007-10-12 18:11:17 +04:00
/**
* get_ccwdev_by_busid ( ) - obtain device from a bus id
* @ cdrv : driver the device is owned by
* @ bus_id : bus id of the device to be searched
*
* This function searches all devices owned by @ cdrv for a device with a bus
* id matching @ bus_id .
* Returns :
* If a match is found , its reference count of the found device is increased
* and it is returned ; else % NULL is returned .
*/
struct ccw_device * get_ccwdev_by_busid ( struct ccw_driver * cdrv ,
const char * bus_id )
2005-04-17 02:20:36 +04:00
{
2005-06-26 01:55:27 +04:00
struct device * dev ;
2005-04-17 02:20:36 +04:00
2019-07-24 01:18:32 +03:00
dev = driver_find_device_by_name ( & cdrv - > driver , bus_id ) ;
2005-04-17 02:20:36 +04:00
2006-07-12 18:41:55 +04:00
return dev ? to_ccwdev ( dev ) : NULL ;
2005-04-17 02:20:36 +04:00
}
/************************** device driver handling ************************/
/* This is the implementation of the ccw_driver class. The probe, remove
* and release methods are initially very similar to the device_driver
* implementations , with the difference that they have ccw_device
* arguments .
*
* A ccw driver also contains the information that is needed for
* device matching .
*/
static int
ccw_device_probe ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_driver * cdrv = to_ccwdrv ( dev - > driver ) ;
int ret ;
cdev - > drv = cdrv ; /* to let the driver call _set_online */
2014-01-27 16:29:15 +04:00
ccw_device_set_int_class ( cdev ) ;
2005-04-17 02:20:36 +04:00
ret = cdrv - > probe ? cdrv - > probe ( cdev ) : - ENODEV ;
if ( ret ) {
2006-07-12 18:41:55 +04:00
cdev - > drv = NULL ;
2013-01-02 18:18:18 +04:00
cdev - > private - > int_class = IRQIO_CIO ;
2005-04-17 02:20:36 +04:00
return ret ;
}
return 0 ;
}
2013-12-16 13:51:54 +04:00
static int ccw_device_remove ( struct device * dev )
2005-04-17 02:20:36 +04:00
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct ccw_driver * cdrv = cdev - > drv ;
2018-03-15 17:03:43 +03:00
struct subchannel * sch ;
2005-04-17 02:20:36 +04:00
int ret ;
if ( cdrv - > remove )
cdrv - > remove ( cdev ) ;
2013-12-16 13:51:54 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
if ( cdev - > online ) {
cdev - > online = 0 ;
ret = ccw_device_offline ( cdev ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
if ( ret = = 0 )
wait_event ( cdev - > private - > wait_q ,
dev_fsm_final_state ( cdev ) ) ;
else
2008-05-07 11:22:54 +04:00
CIO_MSG_EVENT ( 0 , " ccw_device_offline returned %d, "
2007-07-27 14:29:19 +04:00
" device 0.%x.%04x \n " ,
ret , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
2008-12-25 15:39:06 +03:00
/* Give up reference obtained in ccw_device_set_online(). */
put_device ( & cdev - > dev ) ;
2013-12-16 13:51:54 +04:00
spin_lock_irq ( cdev - > ccwlock ) ;
2005-04-17 02:20:36 +04:00
}
ccw_device_set_timeout ( cdev , 0 ) ;
2006-07-12 18:41:55 +04:00
cdev - > drv = NULL ;
2013-01-02 18:18:18 +04:00
cdev - > private - > int_class = IRQIO_CIO ;
2018-03-15 17:03:43 +03:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2013-12-16 13:51:54 +04:00
spin_unlock_irq ( cdev - > ccwlock ) ;
2018-03-15 17:03:43 +03:00
io_subchannel_quiesce ( sch ) ;
2015-09-07 20:51:39 +03:00
__disable_cmf ( cdev ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2007-10-12 18:11:21 +04:00
static void ccw_device_shutdown ( struct device * dev )
{
struct ccw_device * cdev ;
cdev = to_ccwdev ( dev ) ;
if ( cdev - > drv & & cdev - > drv - > shutdown )
cdev - > drv - > shutdown ( cdev ) ;
2015-09-15 14:11:42 +03:00
__disable_cmf ( cdev ) ;
2007-10-12 18:11:21 +04:00
}
2009-06-16 12:30:20 +04:00
static int ccw_device_pm_prepare ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2009-12-07 14:51:19 +03:00
if ( work_pending ( & cdev - > private - > todo_work ) )
2009-06-16 12:30:20 +04:00
return - EAGAIN ;
/* Fail while device is being set online/offline. */
if ( atomic_read ( & cdev - > private - > onoff ) )
return - EAGAIN ;
if ( cdev - > online & & cdev - > drv & & cdev - > drv - > prepare )
return cdev - > drv - > prepare ( cdev ) ;
return 0 ;
}
static void ccw_device_pm_complete ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
if ( cdev - > online & & cdev - > drv & & cdev - > drv - > complete )
cdev - > drv - > complete ( cdev ) ;
}
static int ccw_device_pm_freeze ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
int ret , cm_enabled ;
/* Fail suspend while device is in transistional state. */
if ( ! dev_fsm_final_state ( cdev ) )
return - EAGAIN ;
if ( ! cdev - > online )
return 0 ;
if ( cdev - > drv & & cdev - > drv - > freeze ) {
ret = cdev - > drv - > freeze ( cdev ) ;
if ( ret )
return ret ;
}
spin_lock_irq ( sch - > lock ) ;
cm_enabled = cdev - > private - > cmb ! = NULL ;
spin_unlock_irq ( sch - > lock ) ;
if ( cm_enabled ) {
/* Don't have the css write on memory. */
ret = ccw_set_cmf ( cdev , 0 ) ;
if ( ret )
return ret ;
}
/* From here on, disallow device driver I/O. */
spin_lock_irq ( sch - > lock ) ;
ret = cio_disable_subchannel ( sch ) ;
spin_unlock_irq ( sch - > lock ) ;
return ret ;
}
static int ccw_device_pm_thaw ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
int ret , cm_enabled ;
if ( ! cdev - > online )
return 0 ;
spin_lock_irq ( sch - > lock ) ;
/* Allow device driver I/O again. */
ret = cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) ;
cm_enabled = cdev - > private - > cmb ! = NULL ;
spin_unlock_irq ( sch - > lock ) ;
if ( ret )
return ret ;
if ( cm_enabled ) {
ret = ccw_set_cmf ( cdev , 1 ) ;
if ( ret )
return ret ;
}
if ( cdev - > drv & & cdev - > drv - > thaw )
ret = cdev - > drv - > thaw ( cdev ) ;
return ret ;
}
static void __ccw_device_pm_restore ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
2010-02-27 00:37:29 +03:00
spin_lock_irq ( sch - > lock ) ;
if ( cio_is_console ( sch - > schid ) ) {
cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) ;
goto out_unlock ;
}
2009-06-16 12:30:20 +04:00
/*
* While we were sleeping , devices may have gone or become
* available again . Kick re - detection .
*/
cdev - > private - > flags . resuming = 1 ;
2011-01-12 11:55:10 +03:00
cdev - > private - > path_new_mask = LPM_ANYPATH ;
2011-12-01 16:32:19 +04:00
css_sched_sch_todo ( sch , SCH_TODO_EVAL ) ;
2010-02-27 00:37:29 +03:00
spin_unlock_irq ( sch - > lock ) ;
2011-12-01 16:32:19 +04:00
css_wait_for_slow_path ( ) ;
2010-02-27 00:37:29 +03:00
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel ( cdev - > dev . parent ) ;
spin_lock_irq ( sch - > lock ) ;
if ( cdev - > private - > state ! = DEV_STATE_ONLINE & &
cdev - > private - > state ! = DEV_STATE_OFFLINE )
goto out_unlock ;
2009-12-07 14:51:21 +03:00
ccw_device_recognition ( cdev ) ;
2009-06-16 12:30:20 +04:00
spin_unlock_irq ( sch - > lock ) ;
wait_event ( cdev - > private - > wait_q , dev_fsm_final_state ( cdev ) | |
cdev - > private - > state = = DEV_STATE_DISCONNECTED ) ;
2010-02-27 00:37:29 +03:00
spin_lock_irq ( sch - > lock ) ;
out_unlock :
2009-06-16 12:30:20 +04:00
cdev - > private - > flags . resuming = 0 ;
2010-02-27 00:37:29 +03:00
spin_unlock_irq ( sch - > lock ) ;
2009-06-16 12:30:20 +04:00
}
static int resume_handle_boxed ( struct ccw_device * cdev )
{
cdev - > private - > state = DEV_STATE_BOXED ;
2010-02-27 00:37:28 +03:00
if ( ccw_device_notify ( cdev , CIO_BOXED ) = = NOTIFY_OK )
2009-06-16 12:30:20 +04:00
return 0 ;
2009-12-07 14:51:19 +03:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2009-06-16 12:30:20 +04:00
return - ENODEV ;
}
static int resume_handle_disc ( struct ccw_device * cdev )
{
cdev - > private - > state = DEV_STATE_DISCONNECTED ;
2010-02-27 00:37:28 +03:00
if ( ccw_device_notify ( cdev , CIO_GONE ) = = NOTIFY_OK )
2009-06-16 12:30:20 +04:00
return 0 ;
2009-12-07 14:51:19 +03:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2009-06-16 12:30:20 +04:00
return - ENODEV ;
}
static int ccw_device_pm_restore ( struct device * dev )
{
struct ccw_device * cdev = to_ccwdev ( dev ) ;
2010-02-27 00:37:29 +03:00
struct subchannel * sch ;
int ret = 0 ;
2009-06-16 12:30:20 +04:00
__ccw_device_pm_restore ( cdev ) ;
2010-02-27 00:37:29 +03:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2009-06-16 12:30:20 +04:00
spin_lock_irq ( sch - > lock ) ;
2010-02-27 00:37:29 +03:00
if ( cio_is_console ( sch - > schid ) )
2009-06-16 12:30:20 +04:00
goto out_restore ;
2010-02-27 00:37:29 +03:00
2009-06-16 12:30:20 +04:00
/* check recognition results */
switch ( cdev - > private - > state ) {
case DEV_STATE_OFFLINE :
2010-02-27 00:37:29 +03:00
case DEV_STATE_ONLINE :
cdev - > private - > flags . donotify = 0 ;
2009-06-16 12:30:20 +04:00
break ;
case DEV_STATE_BOXED :
ret = resume_handle_boxed ( cdev ) ;
if ( ret )
2010-02-27 00:37:29 +03:00
goto out_unlock ;
2009-06-16 12:30:20 +04:00
goto out_restore ;
default :
2010-02-27 00:37:29 +03:00
ret = resume_handle_disc ( cdev ) ;
if ( ret )
goto out_unlock ;
goto out_restore ;
2009-06-16 12:30:20 +04:00
}
/* check if the device type has changed */
if ( ! ccw_device_test_sense_data ( cdev ) ) {
ccw_device_update_sense_data ( cdev ) ;
2009-12-07 14:51:19 +03:00
ccw_device_sched_todo ( cdev , CDEV_TODO_REBIND ) ;
2009-06-16 12:30:20 +04:00
ret = - ENODEV ;
goto out_unlock ;
}
2010-02-27 00:37:29 +03:00
if ( ! cdev - > online )
2009-06-16 12:30:20 +04:00
goto out_unlock ;
2010-02-27 00:37:29 +03:00
if ( ccw_device_online ( cdev ) ) {
ret = resume_handle_disc ( cdev ) ;
if ( ret )
goto out_unlock ;
goto out_restore ;
}
2009-06-16 12:30:20 +04:00
spin_unlock_irq ( sch - > lock ) ;
wait_event ( cdev - > private - > wait_q , dev_fsm_final_state ( cdev ) ) ;
2010-02-27 00:37:29 +03:00
spin_lock_irq ( sch - > lock ) ;
if ( ccw_device_notify ( cdev , CIO_OPER ) = = NOTIFY_BAD ) {
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
ret = - ENODEV ;
goto out_unlock ;
2009-06-16 12:30:20 +04:00
}
2010-02-27 00:37:29 +03:00
/* reenable cmf, if needed */
if ( cdev - > private - > cmb ) {
spin_unlock_irq ( sch - > lock ) ;
2009-06-16 12:30:20 +04:00
ret = ccw_set_cmf ( cdev , 1 ) ;
2010-02-27 00:37:29 +03:00
spin_lock_irq ( sch - > lock ) ;
2009-06-16 12:30:20 +04:00
if ( ret ) {
2009-09-11 12:28:23 +04:00
CIO_MSG_EVENT ( 2 , " resume: cdev 0.%x.%04x: cmf failed "
" (rc=%d) \n " , cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno , ret ) ;
2009-06-16 12:30:20 +04:00
ret = 0 ;
}
}
out_restore :
2010-02-27 00:37:29 +03:00
spin_unlock_irq ( sch - > lock ) ;
2009-06-16 12:30:20 +04:00
if ( cdev - > online & & cdev - > drv & & cdev - > drv - > restore )
ret = cdev - > drv - > restore ( cdev ) ;
return ret ;
out_unlock :
spin_unlock_irq ( sch - > lock ) ;
return ret ;
}
2009-12-15 05:00:08 +03:00
static const struct dev_pm_ops ccw_pm_ops = {
2009-06-16 12:30:20 +04:00
. prepare = ccw_device_pm_prepare ,
. complete = ccw_device_pm_complete ,
. freeze = ccw_device_pm_freeze ,
. thaw = ccw_device_pm_thaw ,
. restore = ccw_device_pm_restore ,
} ;
2011-03-23 12:16:03 +03:00
static struct bus_type ccw_bus_type = {
2006-01-11 12:56:22 +03:00
. name = " ccw " ,
. match = ccw_bus_match ,
. uevent = ccw_uevent ,
. probe = ccw_device_probe ,
. remove = ccw_device_remove ,
2007-10-12 18:11:21 +04:00
. shutdown = ccw_device_shutdown ,
2009-06-16 12:30:20 +04:00
. pm = & ccw_pm_ops ,
2006-01-11 12:56:22 +03:00
} ;
2007-10-12 18:11:17 +04:00
/**
* ccw_driver_register ( ) - register a ccw driver
* @ cdriver : driver to be registered
*
* This function is mainly a wrapper around driver_register ( ) .
* Returns :
* % 0 on success and a negative error value on failure .
*/
int ccw_driver_register ( struct ccw_driver * cdriver )
2005-04-17 02:20:36 +04:00
{
struct device_driver * drv = & cdriver - > driver ;
drv - > bus = & ccw_bus_type ;
return driver_register ( drv ) ;
}
2007-10-12 18:11:17 +04:00
/**
* ccw_driver_unregister ( ) - deregister a ccw driver
* @ cdriver : driver to be deregistered
*
* This function is mainly a wrapper around driver_unregister ( ) .
*/
void ccw_driver_unregister ( struct ccw_driver * cdriver )
2005-04-17 02:20:36 +04:00
{
driver_unregister ( & cdriver - > driver ) ;
}
2009-12-07 14:51:19 +03:00
static void ccw_device_todo ( struct work_struct * work )
{
struct ccw_device_private * priv ;
struct ccw_device * cdev ;
struct subchannel * sch ;
enum cdev_todo todo ;
priv = container_of ( work , struct ccw_device_private , todo_work ) ;
cdev = priv - > cdev ;
sch = to_subchannel ( cdev - > dev . parent ) ;
/* Find out todo. */
spin_lock_irq ( cdev - > ccwlock ) ;
todo = priv - > todo ;
priv - > todo = CDEV_TODO_NOTHING ;
CIO_MSG_EVENT ( 4 , " cdev_todo: cdev=0.%x.%04x todo=%d \n " ,
priv - > dev_id . ssid , priv - > dev_id . devno , todo ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
/* Perform todo. */
switch ( todo ) {
case CDEV_TODO_ENABLE_CMF :
cmf_reenable ( cdev ) ;
break ;
case CDEV_TODO_REBIND :
ccw_device_do_unbind_bind ( cdev ) ;
break ;
case CDEV_TODO_REGISTER :
io_subchannel_register ( cdev ) ;
break ;
case CDEV_TODO_UNREG_EVAL :
if ( ! sch_is_pseudo_sch ( sch ) )
css_schedule_eval ( sch - > schid ) ;
2020-03-10 23:39:50 +03:00
fallthrough ;
2009-12-07 14:51:19 +03:00
case CDEV_TODO_UNREG :
if ( sch_is_pseudo_sch ( sch ) )
ccw_device_unregister ( cdev ) ;
else
ccw_device_call_sch_unregister ( cdev ) ;
break ;
default :
break ;
}
/* Release workqueue ref. */
put_device ( & cdev - > dev ) ;
}
/**
* ccw_device_sched_todo - schedule ccw device operation
* @ cdev : ccw device
* @ todo : todo
*
* Schedule the operation identified by @ todo to be performed on the slow path
* workqueue . Do nothing if another operation with higher priority is already
* scheduled . Needs to be called with ccwdev lock held .
*/
void ccw_device_sched_todo ( struct ccw_device * cdev , enum cdev_todo todo )
{
CIO_MSG_EVENT ( 4 , " cdev_todo: sched cdev=0.%x.%04x todo=%d \n " ,
cdev - > private - > dev_id . ssid , cdev - > private - > dev_id . devno ,
todo ) ;
if ( cdev - > private - > todo > = todo )
return ;
cdev - > private - > todo = todo ;
/* Get workqueue ref. */
if ( ! get_device ( & cdev - > dev ) )
return ;
2010-02-27 00:37:24 +03:00
if ( ! queue_work ( cio_work_q , & cdev - > private - > todo_work ) ) {
2009-12-07 14:51:19 +03:00
/* Already queued, release workqueue ref. */
put_device ( & cdev - > dev ) ;
}
}
2010-08-09 20:12:50 +04:00
/**
* ccw_device_siosl ( ) - initiate logging
* @ cdev : ccw device
*
* This function is used to invoke model - dependent logging within the channel
* subsystem .
*/
int ccw_device_siosl ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
return chsc_siosl ( sch - > schid ) ;
}
EXPORT_SYMBOL_GPL ( ccw_device_siosl ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( ccw_device_set_online ) ;
EXPORT_SYMBOL ( ccw_device_set_offline ) ;
EXPORT_SYMBOL ( ccw_driver_register ) ;
EXPORT_SYMBOL ( ccw_driver_unregister ) ;
EXPORT_SYMBOL ( get_ccwdev_by_busid ) ;