2017-11-14 18:38:02 +01:00
// SPDX-License-Identifier: GPL-2.0
2005-04-16 15:20:36 -07:00
/*
* finite state machine for device handling
*
2012-07-20 11:15:04 +02:00
* Copyright IBM Corp . 2002 , 2008
2006-01-14 13:21:04 -08:00
* Author ( s ) : Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-16 15:20:36 -07:00
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
# include <linux/module.h>
# include <linux/init.h>
2005-10-30 15:03:48 -08:00
# include <linux/jiffies.h>
# include <linux/string.h>
2005-04-16 15:20:36 -07:00
# include <asm/ccwdev.h>
2005-09-03 15:58:01 -07:00
# include <asm/cio.h>
2007-04-27 16:01:31 +02:00
# include <asm/chpid.h>
2005-04-16 15:20:36 -07:00
# include "cio.h"
# include "cio_debug.h"
# include "css.h"
# include "device.h"
# include "chsc.h"
# include "ioasm.h"
2007-04-27 16:01:28 +02:00
# include "chp.h"
2005-04-16 15:20:36 -07:00
2008-01-26 14:10:37 +01:00
static int timeout_log_enabled ;
static int __init ccw_timeout_log_setup ( char * unused )
{
timeout_log_enabled = 1 ;
return 1 ;
}
__setup ( " ccw_timeout_log " , ccw_timeout_log_setup ) ;
static void ccw_timeout_log ( struct ccw_device * cdev )
{
struct schib schib ;
struct subchannel * sch ;
2008-01-26 14:10:43 +01:00
struct io_subchannel_private * private ;
2008-07-14 09:58:51 +02:00
union orb * orb ;
2008-01-26 14:10:37 +01:00
int cc ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2008-01-26 14:10:43 +01:00
private = to_io_private ( sch ) ;
2008-07-14 09:58:51 +02:00
orb = & private - > orb ;
2015-12-18 12:58:47 +01:00
cc = stsch ( sch - > schid , & schib ) ;
2008-01-26 14:10:37 +01:00
printk ( KERN_WARNING " cio: ccw device timeout occurred at %llx, "
2013-01-30 09:49:40 +01:00
" device information: \n " , get_tod_clock ( ) ) ;
2008-01-26 14:10:37 +01:00
printk ( KERN_WARNING " cio: orb: \n " ) ;
print_hex_dump ( KERN_WARNING , " cio: " , DUMP_PREFIX_NONE , 16 , 1 ,
2008-07-14 09:58:51 +02:00
orb , sizeof ( * orb ) , 0 ) ;
2008-10-10 21:33:09 +02:00
printk ( KERN_WARNING " cio: ccw device bus id: %s \n " ,
dev_name ( & cdev - > dev ) ) ;
printk ( KERN_WARNING " cio: subchannel bus id: %s \n " ,
dev_name ( & sch - > dev ) ) ;
2008-01-26 14:10:37 +01:00
printk ( KERN_WARNING " cio: subchannel lpm: %02x, opm: %02x, "
" vpm: %02x \n " , sch - > lpm , sch - > opm , sch - > vpm ) ;
2008-07-14 09:58:51 +02:00
if ( orb - > tm . b ) {
printk ( KERN_WARNING " cio: orb indicates transport mode \n " ) ;
printk ( KERN_WARNING " cio: last tcw: \n " ) ;
print_hex_dump ( KERN_WARNING , " cio: " , DUMP_PREFIX_NONE , 16 , 1 ,
( void * ) ( addr_t ) orb - > tm . tcw ,
sizeof ( struct tcw ) , 0 ) ;
} else {
printk ( KERN_WARNING " cio: orb indicates command mode \n " ) ;
2019-03-26 12:41:09 +01:00
if ( ( void * ) ( addr_t ) orb - > cmd . cpa = =
& private - > dma_area - > sense_ccw | |
( void * ) ( addr_t ) orb - > cmd . cpa = =
cdev - > private - > dma_area - > iccws )
2008-07-14 09:58:51 +02:00
printk ( KERN_WARNING " cio: last channel program "
" (intern): \n " ) ;
else
printk ( KERN_WARNING " cio: last channel program: \n " ) ;
print_hex_dump ( KERN_WARNING , " cio: " , DUMP_PREFIX_NONE , 16 , 1 ,
( void * ) ( addr_t ) orb - > cmd . cpa ,
sizeof ( struct ccw1 ) , 0 ) ;
}
2008-01-26 14:10:37 +01:00
printk ( KERN_WARNING " cio: ccw device state: %d \n " ,
cdev - > private - > state ) ;
printk ( KERN_WARNING " cio: store subchannel returned: cc=%d \n " , cc ) ;
printk ( KERN_WARNING " cio: schib: \n " ) ;
print_hex_dump ( KERN_WARNING , " cio: " , DUMP_PREFIX_NONE , 16 , 1 ,
& schib , sizeof ( schib ) , 0 ) ;
printk ( KERN_WARNING " cio: ccw device flags: \n " ) ;
print_hex_dump ( KERN_WARNING , " cio: " , DUMP_PREFIX_NONE , 16 , 1 ,
& cdev - > private - > flags , sizeof ( cdev - > private - > flags ) , 0 ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Timeout function . It just triggers a DEV_EVENT_TIMEOUT .
*/
2017-10-16 16:43:25 -07:00
void
ccw_device_timeout ( struct timer_list * t )
2005-04-16 15:20:36 -07:00
{
2017-10-16 16:43:25 -07:00
struct ccw_device_private * priv = from_timer ( priv , t , timer ) ;
struct ccw_device * cdev = priv - > cdev ;
2005-04-16 15:20:36 -07:00
spin_lock_irq ( cdev - > ccwlock ) ;
2008-01-26 14:10:37 +01:00
if ( timeout_log_enabled )
ccw_timeout_log ( cdev ) ;
2005-04-16 15:20:36 -07:00
dev_fsm_event ( cdev , DEV_EVENT_TIMEOUT ) ;
spin_unlock_irq ( cdev - > ccwlock ) ;
}
/*
* Set timeout
*/
void
ccw_device_set_timeout ( struct ccw_device * cdev , int expires )
{
if ( expires = = 0 ) {
del_timer ( & cdev - > private - > timer ) ;
return ;
}
if ( timer_pending ( & cdev - > private - > timer ) ) {
if ( mod_timer ( & cdev - > private - > timer , jiffies + expires ) )
return ;
}
cdev - > private - > timer . expires = jiffies + expires ;
add_timer ( & cdev - > private - > timer ) ;
}
int
ccw_device_cancel_halt_clear ( struct ccw_device * cdev )
{
struct subchannel * sch ;
int ret ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2017-03-17 04:17:28 +01:00
ret = cio_cancel_halt_clear ( sch , & cdev - > private - > iretry ) ;
if ( ret = = - EIO )
CIO_MSG_EVENT ( 0 , " 0.%x.%04x: could not stop I/O \n " ,
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
return ret ;
2005-04-16 15:20:36 -07:00
}
2009-06-16 10:30:20 +02:00
void ccw_device_update_sense_data ( struct ccw_device * cdev )
2005-04-16 15:20:36 -07:00
{
2009-06-16 10:30:20 +02:00
memset ( & cdev - > id , 0 , sizeof ( cdev - > id ) ) ;
2019-03-26 12:41:09 +01:00
cdev - > id . cu_type = cdev - > private - > dma_area - > senseid . cu_type ;
cdev - > id . cu_model = cdev - > private - > dma_area - > senseid . cu_model ;
cdev - > id . dev_type = cdev - > private - > dma_area - > senseid . dev_type ;
cdev - > id . dev_model = cdev - > private - > dma_area - > senseid . dev_model ;
2009-06-16 10:30:20 +02:00
}
2005-04-16 15:20:36 -07:00
2009-06-16 10:30:20 +02:00
int ccw_device_test_sense_data ( struct ccw_device * cdev )
{
2019-03-26 12:41:09 +01:00
return cdev - > id . cu_type = =
cdev - > private - > dma_area - > senseid . cu_type & &
cdev - > id . cu_model = =
cdev - > private - > dma_area - > senseid . cu_model & &
cdev - > id . dev_type = =
cdev - > private - > dma_area - > senseid . dev_type & &
cdev - > id . dev_model = =
cdev - > private - > dma_area - > senseid . dev_model ;
2005-04-16 15:20:36 -07:00
}
/*
* The machine won ' t give us any notification by machine check if a chpid has
* been varied online on the SE so we have to find out by magic ( i . e . driving
* the channel subsystem to device selection and updating our path masks ) .
*/
2007-02-05 21:18:53 +01:00
static void
2005-04-16 15:20:36 -07:00
__recover_lost_chpids ( struct subchannel * sch , int old_lpm )
{
int mask , i ;
2007-04-27 16:01:26 +02:00
struct chp_id chpid ;
2005-04-16 15:20:36 -07:00
2007-04-27 16:01:26 +02:00
chp_id_init ( & chpid ) ;
2005-04-16 15:20:36 -07:00
for ( i = 0 ; i < 8 ; i + + ) {
mask = 0x80 > > i ;
if ( ! ( sch - > lpm & mask ) )
continue ;
if ( old_lpm & mask )
continue ;
2007-04-27 16:01:26 +02:00
chpid . id = sch - > schib . pmcw . chpid [ i ] ;
2007-04-27 16:01:34 +02:00
if ( ! chp_is_registered ( chpid ) )
css_schedule_eval_all ( ) ;
2005-04-16 15:20:36 -07:00
}
}
/*
* Stop device recognition .
*/
static void
ccw_device_recog_done ( struct ccw_device * cdev , int state )
{
struct subchannel * sch ;
2009-06-16 10:30:20 +02:00
int old_lpm ;
2005-04-16 15:20:36 -07:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2009-12-07 12:51:36 +01:00
if ( cio_disable_subchannel ( sch ) )
state = DEV_STATE_NOT_OPER ;
2005-04-16 15:20:36 -07:00
/*
* Now that we tried recognition , we have performed device selection
* through ssch ( ) and the path information is up to date .
*/
old_lpm = sch - > lpm ;
2008-12-25 13:39:12 +01:00
2005-07-29 14:03:37 -07:00
/* Check since device may again have become not operational. */
2008-12-25 13:39:12 +01:00
if ( cio_update_schib ( sch ) )
2005-07-29 14:03:37 -07:00
state = DEV_STATE_NOT_OPER ;
2008-12-25 13:39:12 +01:00
else
sch - > lpm = sch - > schib . pmcw . pam & sch - > opm ;
2005-04-16 15:20:36 -07:00
if ( cdev - > private - > state = = DEV_STATE_DISCONNECTED_SENSE_ID )
/* Force reprobe on all chpids. */
old_lpm = 0 ;
if ( sch - > lpm ! = old_lpm )
__recover_lost_chpids ( sch , old_lpm ) ;
2009-03-31 19:16:05 +02:00
if ( cdev - > private - > state = = DEV_STATE_DISCONNECTED_SENSE_ID & &
( state = = DEV_STATE_NOT_OPER | | state = = DEV_STATE_BOXED ) ) {
cdev - > private - > flags . recog_done = 1 ;
cdev - > private - > state = DEV_STATE_DISCONNECTED ;
wake_up ( & cdev - > private - > wait_q ) ;
return ;
2005-04-16 15:20:36 -07:00
}
2009-06-16 10:30:20 +02:00
if ( cdev - > private - > flags . resuming ) {
cdev - > private - > state = state ;
cdev - > private - > flags . recog_done = 1 ;
wake_up ( & cdev - > private - > wait_q ) ;
return ;
}
2005-04-16 15:20:36 -07:00
switch ( state ) {
case DEV_STATE_NOT_OPER :
break ;
case DEV_STATE_OFFLINE :
2009-06-16 10:30:20 +02:00
if ( ! cdev - > online ) {
ccw_device_update_sense_data ( cdev ) ;
break ;
2005-04-16 15:20:36 -07:00
}
2009-06-16 10:30:20 +02:00
cdev - > private - > state = DEV_STATE_OFFLINE ;
cdev - > private - > flags . recog_done = 1 ;
if ( ccw_device_test_sense_data ( cdev ) ) {
cdev - > private - > flags . donotify = 1 ;
ccw_device_online ( cdev ) ;
wake_up ( & cdev - > private - > wait_q ) ;
} else {
ccw_device_update_sense_data ( cdev ) ;
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_REBIND ) ;
2005-04-16 15:20:36 -07:00
}
2009-06-16 10:30:20 +02:00
return ;
2005-04-16 15:20:36 -07:00
case DEV_STATE_BOXED :
2009-03-31 19:16:05 +02:00
if ( cdev - > id . cu_type ! = 0 ) { /* device was recognized before */
cdev - > private - > flags . recog_done = 1 ;
cdev - > private - > state = DEV_STATE_BOXED ;
wake_up ( & cdev - > private - > wait_q ) ;
return ;
}
2005-04-16 15:20:36 -07:00
break ;
}
cdev - > private - > state = state ;
io_subchannel_recog_done ( cdev ) ;
2009-03-31 19:16:03 +02:00
wake_up ( & cdev - > private - > wait_q ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Function called from device_id . c after sense id has completed .
*/
void
ccw_device_sense_id_done ( struct ccw_device * cdev , int err )
{
switch ( err ) {
case 0 :
ccw_device_recog_done ( cdev , DEV_STATE_OFFLINE ) ;
break ;
case - ETIME : /* Sense id stopped by timeout. */
ccw_device_recog_done ( cdev , DEV_STATE_BOXED ) ;
break ;
default :
ccw_device_recog_done ( cdev , DEV_STATE_NOT_OPER ) ;
break ;
}
}
2010-02-26 22:37:28 +01:00
/**
* ccw_device_notify ( ) - inform the device ' s driver about an event
2011-03-30 22:57:33 -03:00
* @ cdev : device for which an event occurred
2010-02-26 22:37:28 +01:00
* @ event : event that occurred
*
* Returns :
* - % EINVAL if the device is offline or has no driver .
* - % EOPNOTSUPP if the device ' s driver has no notifier registered .
* % NOTIFY_OK if the driver wants to keep the device .
* % NOTIFY_BAD if the driver doesn ' t want to keep the device .
*/
2008-07-14 09:58:45 +02:00
int ccw_device_notify ( struct ccw_device * cdev , int event )
{
2010-02-26 22:37:28 +01:00
int ret = - EINVAL ;
2008-07-14 09:58:45 +02:00
if ( ! cdev - > drv )
2010-02-26 22:37:28 +01:00
goto out ;
2008-07-14 09:58:45 +02:00
if ( ! cdev - > online )
2010-02-26 22:37:28 +01:00
goto out ;
2008-08-21 19:46:39 +02:00
CIO_MSG_EVENT ( 2 , " notify called for 0.%x.%04x, event=%d \n " ,
cdev - > private - > dev_id . ssid , cdev - > private - > dev_id . devno ,
event ) ;
2010-02-26 22:37:28 +01:00
if ( ! cdev - > drv - > notify ) {
ret = - EOPNOTSUPP ;
goto out ;
}
if ( cdev - > drv - > notify ( cdev , event ) )
ret = NOTIFY_OK ;
else
ret = NOTIFY_BAD ;
out :
return ret ;
2008-07-14 09:58:45 +02:00
}
2008-08-21 19:46:39 +02:00
static void ccw_device_oper_notify ( struct ccw_device * cdev )
{
2010-10-25 16:10:34 +02:00
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
2010-02-26 22:37:28 +01:00
if ( ccw_device_notify ( cdev , CIO_OPER ) = = NOTIFY_OK ) {
2006-06-29 15:08:41 +02:00
/* Reenable channel measurements, if needed. */
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_ENABLE_CMF ) ;
2010-10-25 16:10:34 +02:00
/* Save indication for new paths. */
cdev - > private - > path_new_mask = sch - > vpm ;
2008-08-21 19:46:39 +02:00
return ;
}
/* Driver doesn't want device back. */
ccw_device_set_notoper ( cdev ) ;
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_REBIND ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Finished with online / offline processing .
*/
static void
ccw_device_done ( struct ccw_device * cdev , int state )
{
struct subchannel * sch ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2006-10-04 20:02:02 +02:00
ccw_device_set_timeout ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
if ( state ! = DEV_STATE_ONLINE )
cio_disable_subchannel ( sch ) ;
/* Reset device status. */
2019-03-26 12:41:09 +01:00
memset ( & cdev - > private - > dma_area - > irb , 0 , sizeof ( struct irb ) ) ;
2005-04-16 15:20:36 -07:00
cdev - > private - > state = state ;
2009-10-06 10:34:01 +02:00
switch ( state ) {
case DEV_STATE_BOXED :
2008-05-07 09:22:54 +02:00
CIO_MSG_EVENT ( 0 , " Boxed device %04x on subchannel %04x \n " ,
cdev - > private - > dev_id . devno , sch - > schid . sch_no ) ;
2010-02-26 22:37:28 +01:00
if ( cdev - > online & &
ccw_device_notify ( cdev , CIO_BOXED ) ! = NOTIFY_OK )
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2009-03-31 19:16:05 +02:00
cdev - > private - > flags . donotify = 0 ;
2009-10-06 10:34:01 +02:00
break ;
case DEV_STATE_NOT_OPER :
2009-09-11 10:28:17 +02:00
CIO_MSG_EVENT ( 0 , " Device %04x gone on subchannel %04x \n " ,
cdev - > private - > dev_id . devno , sch - > schid . sch_no ) ;
2010-02-26 22:37:28 +01:00
if ( ccw_device_notify ( cdev , CIO_GONE ) ! = NOTIFY_OK )
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2009-10-06 10:34:02 +02:00
else
ccw_device_set_disconnected ( cdev ) ;
2009-09-11 10:28:17 +02:00
cdev - > private - > flags . donotify = 0 ;
2009-10-06 10:34:01 +02:00
break ;
case DEV_STATE_DISCONNECTED :
CIO_MSG_EVENT ( 0 , " Disconnected device %04x on subchannel "
" %04x \n " , cdev - > private - > dev_id . devno ,
sch - > schid . sch_no ) ;
2011-05-23 10:23:32 +02:00
if ( ccw_device_notify ( cdev , CIO_NO_PATH ) ! = NOTIFY_OK ) {
cdev - > private - > state = DEV_STATE_NOT_OPER ;
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2011-05-23 10:23:32 +02:00
} else
2009-10-06 10:34:01 +02:00
ccw_device_set_disconnected ( cdev ) ;
cdev - > private - > flags . donotify = 0 ;
break ;
default :
break ;
2009-09-11 10:28:17 +02:00
}
2005-04-16 15:20:36 -07:00
if ( cdev - > private - > flags . donotify ) {
cdev - > private - > flags . donotify = 0 ;
2008-08-21 19:46:39 +02:00
ccw_device_oper_notify ( cdev ) ;
2005-04-16 15:20:36 -07:00
}
wake_up ( & cdev - > private - > wait_q ) ;
}
/*
* Start device recognition .
*/
2009-12-07 12:51:21 +01:00
void ccw_device_recognition ( struct ccw_device * cdev )
2005-04-16 15:20:36 -07:00
{
2009-12-07 12:51:21 +01:00
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
2005-04-16 15:20:36 -07:00
/*
* We used to start here with a sense pgid to find out whether a device
* is locked by someone else . Unfortunately , the sense pgid command
* code has other meanings on devices predating the path grouping
* algorithm , so we start with sense id and box the device after an
* timeout ( or if sense pgid during path verification detects the device
* is locked , as may happen on newer devices ) .
*/
cdev - > private - > flags . recog_done = 0 ;
cdev - > private - > state = DEV_STATE_SENSE_ID ;
2009-12-07 12:51:21 +01:00
if ( cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) ) {
ccw_device_recog_done ( cdev , DEV_STATE_NOT_OPER ) ;
return ;
}
2005-04-16 15:20:36 -07:00
ccw_device_sense_id_start ( cdev ) ;
}
/*
2009-12-07 12:51:26 +01:00
* Handle events for states that use the ccw request infrastructure .
2005-04-16 15:20:36 -07:00
*/
2009-12-07 12:51:26 +01:00
static void ccw_device_request_event ( struct ccw_device * cdev , enum dev_event e )
2005-04-16 15:20:36 -07:00
{
2009-12-07 12:51:26 +01:00
switch ( e ) {
case DEV_EVENT_NOTOPER :
ccw_request_notoper ( cdev ) ;
2005-04-16 15:20:36 -07:00
break ;
2009-12-07 12:51:26 +01:00
case DEV_EVENT_INTERRUPT :
ccw_request_handler ( cdev ) ;
break ;
case DEV_EVENT_TIMEOUT :
ccw_request_timeout ( cdev ) ;
2005-04-16 15:20:36 -07:00
break ;
default :
2009-12-07 12:51:26 +01:00
break ;
2005-04-16 15:20:36 -07:00
}
}
2010-10-25 16:10:34 +02:00
static void ccw_device_report_path_events ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
int path_event [ 8 ] ;
int chp , mask ;
for ( chp = 0 , mask = 0x80 ; chp < 8 ; chp + + , mask > > = 1 ) {
path_event [ chp ] = PE_NONE ;
if ( mask & cdev - > private - > path_gone_mask & ~ ( sch - > vpm ) )
path_event [ chp ] | = PE_PATH_GONE ;
if ( mask & cdev - > private - > path_new_mask & sch - > vpm )
path_event [ chp ] | = PE_PATH_AVAILABLE ;
if ( mask & cdev - > private - > pgid_reset_mask & sch - > vpm )
path_event [ chp ] | = PE_PATHGROUP_ESTABLISHED ;
}
if ( cdev - > online & & cdev - > drv - > path_event )
cdev - > drv - > path_event ( cdev , path_event ) ;
}
static void ccw_device_reset_path_events ( struct ccw_device * cdev )
{
cdev - > private - > path_gone_mask = 0 ;
cdev - > private - > path_new_mask = 0 ;
cdev - > private - > pgid_reset_mask = 0 ;
}
2011-12-01 13:32:21 +01:00
static void create_fake_irb ( struct irb * irb , int type )
{
memset ( irb , 0 , sizeof ( * irb ) ) ;
if ( type = = FAKE_CMD_IRB ) {
struct cmd_scsw * scsw = & irb - > scsw . cmd ;
scsw - > cc = 1 ;
scsw - > fctl = SCSW_FCTL_START_FUNC ;
scsw - > actl = SCSW_ACTL_START_PEND ;
scsw - > stctl = SCSW_STCTL_STATUS_PEND ;
} else if ( type = = FAKE_TM_IRB ) {
struct tm_scsw * scsw = & irb - > scsw . tm ;
scsw - > x = 1 ;
scsw - > cc = 1 ;
scsw - > fctl = SCSW_FCTL_START_FUNC ;
scsw - > actl = SCSW_ACTL_START_PEND ;
scsw - > stctl = SCSW_STCTL_STATUS_PEND ;
}
}
2017-09-14 13:55:22 +02:00
static void ccw_device_handle_broken_paths ( struct ccw_device * cdev )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
u8 broken_paths = ( sch - > schib . pmcw . pam & sch - > opm ) ^ sch - > vpm ;
if ( broken_paths & & ( cdev - > private - > path_broken_mask ! = broken_paths ) )
ccw_device_schedule_recovery ( ) ;
cdev - > private - > path_broken_mask = broken_paths ;
}
2011-12-01 13:32:21 +01:00
void ccw_device_verify_done ( struct ccw_device * cdev , int err )
2005-04-16 15:20:36 -07:00
{
2006-09-20 15:59:59 +02:00
struct subchannel * sch ;
sch = to_subchannel ( cdev - > dev . parent ) ;
/* Update schib - pom may have changed. */
2008-12-25 13:39:12 +01:00
if ( cio_update_schib ( sch ) ) {
2009-12-07 12:51:22 +01:00
err = - ENODEV ;
goto callback ;
2008-12-25 13:39:12 +01:00
}
2006-09-20 15:59:59 +02:00
/* Update lpm with verified path mask. */
sch - > lpm = sch - > vpm ;
/* Repeat path verification? */
if ( cdev - > private - > flags . doverify ) {
ccw_device_verify_start ( cdev ) ;
return ;
}
2009-12-07 12:51:22 +01:00
callback :
2005-04-16 15:20:36 -07:00
switch ( err ) {
case 0 :
ccw_device_done ( cdev , DEV_STATE_ONLINE ) ;
/* Deliver fake irb to device driver, if needed. */
if ( cdev - > private - > flags . fake_irb ) {
2019-03-26 12:41:09 +01:00
create_fake_irb ( & cdev - > private - > dma_area - > irb ,
2011-12-01 13:32:21 +01:00
cdev - > private - > flags . fake_irb ) ;
2005-04-16 15:20:36 -07:00
cdev - > private - > flags . fake_irb = 0 ;
if ( cdev - > handler )
cdev - > handler ( cdev , cdev - > private - > intparm ,
2019-03-26 12:41:09 +01:00
& cdev - > private - > dma_area - > irb ) ;
memset ( & cdev - > private - > dma_area - > irb , 0 ,
sizeof ( struct irb ) ) ;
2005-04-16 15:20:36 -07:00
}
2010-10-25 16:10:34 +02:00
ccw_device_report_path_events ( cdev ) ;
2017-09-14 13:55:22 +02:00
ccw_device_handle_broken_paths ( cdev ) ;
2005-04-16 15:20:36 -07:00
break ;
case - ETIME :
2009-12-07 12:51:31 +01:00
case - EUSERS :
2006-10-18 18:30:43 +02:00
/* Reset oper notify indication after verify error. */
cdev - > private - > flags . donotify = 0 ;
2005-04-16 15:20:36 -07:00
ccw_device_done ( cdev , DEV_STATE_BOXED ) ;
break ;
2009-12-07 12:51:22 +01:00
case - EACCES :
/* Reset oper notify indication after verify error. */
cdev - > private - > flags . donotify = 0 ;
ccw_device_done ( cdev , DEV_STATE_DISCONNECTED ) ;
break ;
2005-04-16 15:20:36 -07:00
default :
2006-10-18 18:30:43 +02:00
/* Reset oper notify indication after verify error. */
cdev - > private - > flags . donotify = 0 ;
2009-12-07 12:51:22 +01:00
ccw_device_done ( cdev , DEV_STATE_NOT_OPER ) ;
2005-04-16 15:20:36 -07:00
break ;
}
2010-10-25 16:10:34 +02:00
ccw_device_reset_path_events ( cdev ) ;
2005-04-16 15:20:36 -07:00
}
/*
* Get device online .
*/
int
ccw_device_online ( struct ccw_device * cdev )
{
struct subchannel * sch ;
int ret ;
if ( ( cdev - > private - > state ! = DEV_STATE_OFFLINE ) & &
( cdev - > private - > state ! = DEV_STATE_BOXED ) )
return - EINVAL ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2008-04-30 13:38:39 +02:00
ret = cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) ;
2005-04-16 15:20:36 -07:00
if ( ret ! = 0 ) {
/* Couldn't enable the subchannel for i/o. Sick device. */
if ( ret = = - ENODEV )
dev_fsm_event ( cdev , DEV_EVENT_NOTOPER ) ;
return ret ;
}
2009-12-07 12:51:31 +01:00
/* Start initial path verification. */
cdev - > private - > state = DEV_STATE_VERIFY ;
ccw_device_verify_start ( cdev ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
void
ccw_device_disband_done ( struct ccw_device * cdev , int err )
{
switch ( err ) {
case 0 :
ccw_device_done ( cdev , DEV_STATE_OFFLINE ) ;
break ;
case - ETIME :
ccw_device_done ( cdev , DEV_STATE_BOXED ) ;
break ;
default :
2007-05-31 17:38:07 +02:00
cdev - > private - > flags . donotify = 0 ;
2005-04-16 15:20:36 -07:00
ccw_device_done ( cdev , DEV_STATE_NOT_OPER ) ;
break ;
}
}
/*
* Shutdown device .
*/
int
ccw_device_offline ( struct ccw_device * cdev )
{
struct subchannel * sch ;
2008-09-09 12:38:59 +02:00
/* Allow ccw_device_offline while disconnected. */
if ( cdev - > private - > state = = DEV_STATE_DISCONNECTED | |
cdev - > private - > state = = DEV_STATE_NOT_OPER ) {
cdev - > private - > flags . donotify = 0 ;
ccw_device_done ( cdev , DEV_STATE_NOT_OPER ) ;
return 0 ;
}
2009-10-06 10:34:03 +02:00
if ( cdev - > private - > state = = DEV_STATE_BOXED ) {
ccw_device_done ( cdev , DEV_STATE_BOXED ) ;
return 0 ;
}
2006-12-08 15:54:28 +01:00
if ( ccw_device_is_orphan ( cdev ) ) {
ccw_device_done ( cdev , DEV_STATE_OFFLINE ) ;
return 0 ;
}
2005-04-16 15:20:36 -07:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2008-12-25 13:39:12 +01:00
if ( cio_update_schib ( sch ) )
2005-04-16 15:20:36 -07:00
return - ENODEV ;
2008-07-14 09:58:50 +02:00
if ( scsw_actl ( & sch - > schib . scsw ) ! = 0 )
2005-04-16 15:20:36 -07:00
return - EBUSY ;
2008-07-14 09:58:50 +02:00
if ( cdev - > private - > state ! = DEV_STATE_ONLINE )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
/* Are we doing path grouping? */
2009-12-07 12:51:30 +01:00
if ( ! cdev - > private - > flags . pgroup ) {
2005-04-16 15:20:36 -07:00
/* No, set state offline immediately. */
ccw_device_done ( cdev , DEV_STATE_OFFLINE ) ;
return 0 ;
}
/* Start Set Path Group commands. */
cdev - > private - > state = DEV_STATE_DISBAND_PGID ;
ccw_device_disband_start ( cdev ) ;
return 0 ;
}
/*
2007-10-12 16:11:26 +02:00
* Handle not operational event in non - special state .
2005-04-16 15:20:36 -07:00
*/
2007-10-12 16:11:26 +02:00
static void ccw_device_generic_notoper ( struct ccw_device * cdev ,
enum dev_event dev_event )
2005-04-16 15:20:36 -07:00
{
2010-02-26 22:37:28 +01:00
if ( ccw_device_notify ( cdev , CIO_GONE ) ! = NOTIFY_OK )
2009-12-07 12:51:19 +01:00
ccw_device_sched_todo ( cdev , CDEV_TODO_UNREG ) ;
2009-10-06 10:34:02 +02:00
else
ccw_device_set_disconnected ( cdev ) ;
2005-04-16 15:20:36 -07:00
}
2009-09-11 10:28:14 +02:00
/*
* Handle path verification event in offline state .
*/
static void ccw_device_offline_verify ( struct ccw_device * cdev ,
enum dev_event dev_event )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
css_schedule_eval ( sch - > schid ) ;
}
2005-04-16 15:20:36 -07:00
/*
* Handle path verification event .
*/
static void
ccw_device_online_verify ( struct ccw_device * cdev , enum dev_event dev_event )
{
struct subchannel * sch ;
if ( cdev - > private - > state = = DEV_STATE_W4SENSE ) {
cdev - > private - > flags . doverify = 1 ;
return ;
}
sch = to_subchannel ( cdev - > dev . parent ) ;
/*
* Since we might not just be coming from an interrupt from the
* subchannel we have to update the schib .
*/
2008-12-25 13:39:12 +01:00
if ( cio_update_schib ( sch ) ) {
ccw_device_verify_done ( cdev , - ENODEV ) ;
return ;
}
2005-04-16 15:20:36 -07:00
2008-07-14 09:58:50 +02:00
if ( scsw_actl ( & sch - > schib . scsw ) ! = 0 | |
( scsw_stctl ( & sch - > schib . scsw ) & SCSW_STCTL_STATUS_PEND ) | |
2019-03-26 12:41:09 +01:00
( scsw_stctl ( & cdev - > private - > dma_area - > irb . scsw ) &
SCSW_STCTL_STATUS_PEND ) ) {
2005-04-16 15:20:36 -07:00
/*
* No final status yet or final status not yet delivered
2011-03-30 22:57:33 -03:00
* to the device driver . Can ' t do path verification now ,
2005-04-16 15:20:36 -07:00
* delay until final status was delivered .
*/
cdev - > private - > flags . doverify = 1 ;
return ;
}
/* Device is idle, we can do the path verification. */
cdev - > private - > state = DEV_STATE_VERIFY ;
ccw_device_verify_start ( cdev ) ;
}
2009-12-07 12:51:32 +01:00
/*
* Handle path verification event in boxed state .
*/
static void ccw_device_boxed_verify ( struct ccw_device * cdev ,
enum dev_event dev_event )
{
struct subchannel * sch = to_subchannel ( cdev - > dev . parent ) ;
if ( cdev - > online ) {
if ( cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) )
ccw_device_done ( cdev , DEV_STATE_NOT_OPER ) ;
else
ccw_device_online_verify ( cdev , dev_event ) ;
} else
css_schedule_eval ( sch - > schid ) ;
}
2015-10-26 12:35:06 +01:00
/*
* Pass interrupt to device driver .
*/
static int ccw_device_call_handler ( struct ccw_device * cdev )
{
unsigned int stctl ;
int ending_status ;
/*
* we allow for the device action handler if .
* - we received ending status
* - the action handler requested to see all interrupts
* - we received an intermediate status
* - fast notification was requested ( primary status )
* - unsolicited interrupts
*/
2019-03-26 12:41:09 +01:00
stctl = scsw_stctl ( & cdev - > private - > dma_area - > irb . scsw ) ;
2015-10-26 12:35:06 +01:00
ending_status = ( stctl & SCSW_STCTL_SEC_STATUS ) | |
( stctl = = ( SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND ) ) | |
( stctl = = SCSW_STCTL_STATUS_PEND ) ;
if ( ! ending_status & &
! cdev - > private - > options . repall & &
! ( stctl & SCSW_STCTL_INTER_STATUS ) & &
! ( cdev - > private - > options . fast & &
( stctl & SCSW_STCTL_PRIM_STATUS ) ) )
return 0 ;
if ( ending_status )
ccw_device_set_timeout ( cdev , 0 ) ;
if ( cdev - > handler )
cdev - > handler ( cdev , cdev - > private - > intparm ,
2019-03-26 12:41:09 +01:00
& cdev - > private - > dma_area - > irb ) ;
2015-10-26 12:35:06 +01:00
2019-03-26 12:41:09 +01:00
memset ( & cdev - > private - > dma_area - > irb , 0 , sizeof ( struct irb ) ) ;
2015-10-26 12:35:06 +01:00
return 1 ;
}
2005-04-16 15:20:36 -07:00
/*
* Got an interrupt for a normal io ( state online ) .
*/
static void
ccw_device_irq ( struct ccw_device * cdev , enum dev_event dev_event )
{
struct irb * irb ;
2008-07-14 09:58:51 +02:00
int is_cmd ;
2005-04-16 15:20:36 -07:00
2014-08-17 12:30:46 -05:00
irb = this_cpu_ptr ( & cio_irb ) ;
2008-07-14 09:58:51 +02:00
is_cmd = ! scsw_is_tm ( & irb - > scsw ) ;
2005-04-16 15:20:36 -07:00
/* Check for unsolicited interrupt. */
2008-07-14 09:58:50 +02:00
if ( ! scsw_is_solicited ( & irb - > scsw ) ) {
2008-07-14 09:58:51 +02:00
if ( is_cmd & & ( irb - > scsw . cmd . dstat & DEV_STAT_UNIT_CHECK ) & &
2005-04-16 15:20:36 -07:00
! irb - > esw . esw0 . erw . cons ) {
/* Unit check but no sense data. Need basic sense. */
if ( ccw_device_do_sense ( cdev , irb ) ! = 0 )
goto call_handler_unsol ;
2019-03-26 12:41:09 +01:00
memcpy ( & cdev - > private - > dma_area - > irb , irb ,
sizeof ( struct irb ) ) ;
2005-04-16 15:20:36 -07:00
cdev - > private - > state = DEV_STATE_W4SENSE ;
cdev - > private - > intparm = 0 ;
return ;
}
call_handler_unsol :
if ( cdev - > handler )
cdev - > handler ( cdev , 0 , irb ) ;
2007-02-05 21:17:09 +01:00
if ( cdev - > private - > flags . doverify )
ccw_device_online_verify ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
return ;
}
/* Accumulate status and find out if a basic sense is needed. */
ccw_device_accumulate_irb ( cdev , irb ) ;
2008-07-14 09:58:51 +02:00
if ( is_cmd & & cdev - > private - > flags . dosense ) {
2005-04-16 15:20:36 -07:00
if ( ccw_device_do_sense ( cdev , irb ) = = 0 ) {
cdev - > private - > state = DEV_STATE_W4SENSE ;
}
return ;
}
/* Call the handler. */
if ( ccw_device_call_handler ( cdev ) & & cdev - > private - > flags . doverify )
/* Start delayed path verification. */
ccw_device_online_verify ( cdev , 0 ) ;
}
/*
* Got an timeout in online state .
*/
static void
ccw_device_online_timeout ( struct ccw_device * cdev , enum dev_event dev_event )
{
int ret ;
ccw_device_set_timeout ( cdev , 0 ) ;
2010-10-25 16:10:44 +02:00
cdev - > private - > iretry = 255 ;
2018-02-07 13:18:19 +01:00
cdev - > private - > async_kill_io_rc = - ETIMEDOUT ;
2005-04-16 15:20:36 -07:00
ret = ccw_device_cancel_halt_clear ( cdev ) ;
if ( ret = = - EBUSY ) {
ccw_device_set_timeout ( cdev , 3 * HZ ) ;
cdev - > private - > state = DEV_STATE_TIMEOUT_KILL ;
return ;
}
2010-10-25 16:10:44 +02:00
if ( ret )
2007-10-12 16:11:26 +02:00
dev_fsm_event ( cdev , DEV_EVENT_NOTOPER ) ;
else if ( cdev - > handler )
2005-04-16 15:20:36 -07:00
cdev - > handler ( cdev , cdev - > private - > intparm ,
ERR_PTR ( - ETIMEDOUT ) ) ;
}
/*
* Got an interrupt for a basic sense .
*/
2007-02-05 21:16:47 +01:00
static void
2005-04-16 15:20:36 -07:00
ccw_device_w4sense ( struct ccw_device * cdev , enum dev_event dev_event )
{
struct irb * irb ;
2014-08-17 12:30:46 -05:00
irb = this_cpu_ptr ( & cio_irb ) ;
2005-04-16 15:20:36 -07:00
/* Check for unsolicited interrupt. */
2008-07-14 09:58:50 +02:00
if ( scsw_stctl ( & irb - > scsw ) = =
( SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS ) ) {
if ( scsw_cc ( & irb - > scsw ) = = 1 )
2005-04-16 15:20:36 -07:00
/* Basic sense hasn't started. Try again. */
ccw_device_do_sense ( cdev , irb ) ;
else {
2008-05-07 09:22:54 +02:00
CIO_MSG_EVENT ( 0 , " 0.%x.%04x: unsolicited "
2007-07-27 12:29:19 +02:00
" interrupt during w4sense... \n " ,
cdev - > private - > dev_id . ssid ,
cdev - > private - > dev_id . devno ) ;
2005-04-16 15:20:36 -07:00
if ( cdev - > handler )
cdev - > handler ( cdev , 0 , irb ) ;
}
return ;
}
2006-03-24 03:15:12 -08:00
/*
* Check if a halt or clear has been issued in the meanwhile . If yes ,
* only deliver the halt / clear interrupt to the device driver as if it
* had killed the original request .
*/
2008-07-14 09:58:50 +02:00
if ( scsw_fctl ( & irb - > scsw ) &
( SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC ) ) {
2006-03-24 03:15:12 -08:00
cdev - > private - > flags . dosense = 0 ;
2019-03-26 12:41:09 +01:00
memset ( & cdev - > private - > dma_area - > irb , 0 , sizeof ( struct irb ) ) ;
2006-03-24 03:15:12 -08:00
ccw_device_accumulate_irb ( cdev , irb ) ;
goto call_handler ;
}
2005-04-16 15:20:36 -07:00
/* Add basic sense info to irb. */
ccw_device_accumulate_basic_sense ( cdev , irb ) ;
if ( cdev - > private - > flags . dosense ) {
/* Another basic sense is needed. */
ccw_device_do_sense ( cdev , irb ) ;
return ;
}
2006-03-24 03:15:12 -08:00
call_handler :
2005-04-16 15:20:36 -07:00
cdev - > private - > state = DEV_STATE_ONLINE ;
2009-09-11 10:28:21 +02:00
/* In case sensing interfered with setting the device online */
wake_up ( & cdev - > private - > wait_q ) ;
2005-04-16 15:20:36 -07:00
/* Call the handler. */
if ( ccw_device_call_handler ( cdev ) & & cdev - > private - > flags . doverify )
/* Start delayed path verification. */
ccw_device_online_verify ( cdev , 0 ) ;
}
static void
ccw_device_killing_irq ( struct ccw_device * cdev , enum dev_event dev_event )
{
ccw_device_set_timeout ( cdev , 0 ) ;
2007-03-05 23:35:59 +01:00
/* Start delayed path verification. */
ccw_device_online_verify ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
/* OK, i/o is dead now. Call interrupt handler. */
if ( cdev - > handler )
cdev - > handler ( cdev , cdev - > private - > intparm ,
2018-02-07 13:18:19 +01:00
ERR_PTR ( cdev - > private - > async_kill_io_rc ) ) ;
2005-04-16 15:20:36 -07:00
}
static void
ccw_device_killing_timeout ( struct ccw_device * cdev , enum dev_event dev_event )
{
int ret ;
ret = ccw_device_cancel_halt_clear ( cdev ) ;
if ( ret = = - EBUSY ) {
ccw_device_set_timeout ( cdev , 3 * HZ ) ;
return ;
}
2007-03-05 23:35:59 +01:00
/* Start delayed path verification. */
ccw_device_online_verify ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
if ( cdev - > handler )
cdev - > handler ( cdev , cdev - > private - > intparm ,
2018-02-07 13:18:19 +01:00
ERR_PTR ( cdev - > private - > async_kill_io_rc ) ) ;
2005-04-16 15:20:36 -07:00
}
2008-07-14 09:58:45 +02:00
void ccw_device_kill_io ( struct ccw_device * cdev )
2005-04-16 15:20:36 -07:00
{
int ret ;
2018-02-12 12:01:03 +01:00
ccw_device_set_timeout ( cdev , 0 ) ;
2010-10-25 16:10:44 +02:00
cdev - > private - > iretry = 255 ;
2018-02-07 13:18:19 +01:00
cdev - > private - > async_kill_io_rc = - EIO ;
2005-04-16 15:20:36 -07:00
ret = ccw_device_cancel_halt_clear ( cdev ) ;
if ( ret = = - EBUSY ) {
ccw_device_set_timeout ( cdev , 3 * HZ ) ;
cdev - > private - > state = DEV_STATE_TIMEOUT_KILL ;
return ;
}
2007-03-05 23:35:59 +01:00
/* Start delayed path verification. */
ccw_device_online_verify ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
if ( cdev - > handler )
cdev - > handler ( cdev , cdev - > private - > intparm ,
2006-10-11 15:31:41 +02:00
ERR_PTR ( - EIO ) ) ;
2005-04-16 15:20:36 -07:00
}
static void
2006-09-20 15:59:59 +02:00
ccw_device_delay_verify ( struct ccw_device * cdev , enum dev_event dev_event )
2005-04-16 15:20:36 -07:00
{
2006-09-20 15:59:59 +02:00
/* Start verification after current task finished. */
2006-07-12 16:40:19 +02:00
cdev - > private - > flags . doverify = 1 ;
2005-04-16 15:20:36 -07:00
}
static void
ccw_device_start_id ( struct ccw_device * cdev , enum dev_event dev_event )
{
struct subchannel * sch ;
sch = to_subchannel ( cdev - > dev . parent ) ;
2008-04-30 13:38:39 +02:00
if ( cio_enable_subchannel ( sch , ( u32 ) ( addr_t ) sch ) ! = 0 )
2005-04-16 15:20:36 -07:00
/* Couldn't enable the subchannel for i/o. Sick device. */
return ;
cdev - > private - > state = DEV_STATE_DISCONNECTED_SENSE_ID ;
ccw_device_sense_id_start ( cdev ) ;
}
2008-07-14 09:58:45 +02:00
void ccw_device_trigger_reprobe ( struct ccw_device * cdev )
2005-04-16 15:20:36 -07:00
{
2008-07-14 09:58:45 +02:00
struct subchannel * sch ;
2005-04-16 15:20:36 -07:00
if ( cdev - > private - > state ! = DEV_STATE_DISCONNECTED )
return ;
2008-07-14 09:58:45 +02:00
sch = to_subchannel ( cdev - > dev . parent ) ;
2005-04-16 15:20:36 -07:00
/* Update some values. */
2008-12-25 13:39:12 +01:00
if ( cio_update_schib ( sch ) )
2006-12-08 15:54:21 +01:00
return ;
2005-04-16 15:20:36 -07:00
/*
* The pim , pam , pom values may not be accurate , but they are the best
* we have before performing device selection : /
*/
2006-09-20 15:59:59 +02:00
sch - > lpm = sch - > schib . pmcw . pam & sch - > opm ;
2008-12-25 13:39:13 +01:00
/*
* Use the initial configuration since we can ' t be shure that the old
* paths are valid .
*/
io_subchannel_init_config ( sch ) ;
2008-12-25 13:39:14 +01:00
if ( cio_commit_config ( sch ) )
return ;
2008-12-25 13:39:13 +01:00
2005-04-16 15:20:36 -07:00
/* We should also udate ssd info, but this has to wait. */
2006-12-08 15:54:28 +01:00
/* Check if this is another device which appeared on the same sch. */
2009-12-07 12:51:17 +01:00
if ( sch - > schib . pmcw . dev ! = cdev - > private - > dev_id . devno )
css_schedule_eval ( sch - > schid ) ;
else
2006-12-08 15:54:28 +01:00
ccw_device_start_id ( cdev , 0 ) ;
2005-04-16 15:20:36 -07:00
}
2009-12-07 12:51:24 +01:00
static void ccw_device_disabled_irq ( struct ccw_device * cdev ,
enum dev_event dev_event )
2005-04-16 15:20:36 -07:00
{
struct subchannel * sch ;
sch = to_subchannel ( cdev - > dev . parent ) ;
/*
2009-12-07 12:51:24 +01:00
* An interrupt in a disabled state means a previous disable was not
2009-03-26 15:24:06 +01:00
* successful - should not happen , but we try to disable again .
2005-04-16 15:20:36 -07:00
*/
cio_disable_subchannel ( sch ) ;
}
static void
ccw_device_change_cmfstate ( struct ccw_device * cdev , enum dev_event dev_event )
{
retry_set_schib ( cdev ) ;
cdev - > private - > state = DEV_STATE_ONLINE ;
dev_fsm_event ( cdev , dev_event ) ;
}
2006-06-29 15:08:41 +02:00
static void ccw_device_update_cmfblock ( struct ccw_device * cdev ,
enum dev_event dev_event )
{
cmf_retry_copy_block ( cdev ) ;
cdev - > private - > state = DEV_STATE_ONLINE ;
dev_fsm_event ( cdev , dev_event ) ;
}
2005-04-16 15:20:36 -07:00
static void
ccw_device_quiesce_done ( struct ccw_device * cdev , enum dev_event dev_event )
{
ccw_device_set_timeout ( cdev , 0 ) ;
2009-12-07 12:51:35 +01:00
cdev - > private - > state = DEV_STATE_NOT_OPER ;
2005-04-16 15:20:36 -07:00
wake_up ( & cdev - > private - > wait_q ) ;
}
static void
ccw_device_quiesce_timeout ( struct ccw_device * cdev , enum dev_event dev_event )
{
int ret ;
ret = ccw_device_cancel_halt_clear ( cdev ) ;
2009-12-07 12:51:35 +01:00
if ( ret = = - EBUSY ) {
ccw_device_set_timeout ( cdev , HZ / 10 ) ;
} else {
2005-04-16 15:20:36 -07:00
cdev - > private - > state = DEV_STATE_NOT_OPER ;
wake_up ( & cdev - > private - > wait_q ) ;
}
}
/*
* No operation action . This is used e . g . to ignore a timeout event in
* state offline .
*/
static void
ccw_device_nop ( struct ccw_device * cdev , enum dev_event dev_event )
{
}
/*
* device statemachine
*/
fsm_func_t * dev_jumptable [ NR_DEV_STATES ] [ NR_DEV_EVENTS ] = {
[ DEV_STATE_NOT_OPER ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_nop ,
2009-12-07 12:51:24 +01:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_disabled_irq ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_TIMEOUT ] = ccw_device_nop ,
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
[ DEV_STATE_SENSE_ID ] = {
2009-12-07 12:51:26 +01:00
[ DEV_EVENT_NOTOPER ] = ccw_device_request_event ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_request_event ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_request_event ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
[ DEV_STATE_OFFLINE ] = {
2007-10-12 16:11:26 +02:00
[ DEV_EVENT_NOTOPER ] = ccw_device_generic_notoper ,
2009-12-07 12:51:24 +01:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_disabled_irq ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_TIMEOUT ] = ccw_device_nop ,
2009-09-11 10:28:14 +02:00
[ DEV_EVENT_VERIFY ] = ccw_device_offline_verify ,
2005-04-16 15:20:36 -07:00
} ,
[ DEV_STATE_VERIFY ] = {
2009-12-07 12:51:27 +01:00
[ DEV_EVENT_NOTOPER ] = ccw_device_request_event ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_request_event ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_request_event ,
2006-09-20 15:59:59 +02:00
[ DEV_EVENT_VERIFY ] = ccw_device_delay_verify ,
2005-04-16 15:20:36 -07:00
} ,
[ DEV_STATE_ONLINE ] = {
2007-10-12 16:11:26 +02:00
[ DEV_EVENT_NOTOPER ] = ccw_device_generic_notoper ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_irq ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_online_timeout ,
[ DEV_EVENT_VERIFY ] = ccw_device_online_verify ,
} ,
[ DEV_STATE_W4SENSE ] = {
2007-10-12 16:11:26 +02:00
[ DEV_EVENT_NOTOPER ] = ccw_device_generic_notoper ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_w4sense ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_nop ,
[ DEV_EVENT_VERIFY ] = ccw_device_online_verify ,
} ,
[ DEV_STATE_DISBAND_PGID ] = {
2009-12-07 12:51:27 +01:00
[ DEV_EVENT_NOTOPER ] = ccw_device_request_event ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_request_event ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_request_event ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
[ DEV_STATE_BOXED ] = {
2007-10-12 16:11:26 +02:00
[ DEV_EVENT_NOTOPER ] = ccw_device_generic_notoper ,
2009-12-07 12:51:32 +01:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_nop ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_nop ,
[ DEV_EVENT_VERIFY ] = ccw_device_boxed_verify ,
2005-04-16 15:20:36 -07:00
} ,
/* states to wait for i/o completion before doing something */
[ DEV_STATE_TIMEOUT_KILL ] = {
2007-10-12 16:11:26 +02:00
[ DEV_EVENT_NOTOPER ] = ccw_device_generic_notoper ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_INTERRUPT ] = ccw_device_killing_irq ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_killing_timeout ,
[ DEV_EVENT_VERIFY ] = ccw_device_nop , //FIXME
} ,
[ DEV_STATE_QUIESCE ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_quiesce_done ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_quiesce_done ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_quiesce_timeout ,
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
/* special states for devices gone not operational */
[ DEV_STATE_DISCONNECTED ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_nop ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_start_id ,
2009-12-07 12:51:24 +01:00
[ DEV_EVENT_TIMEOUT ] = ccw_device_nop ,
2006-09-20 15:59:59 +02:00
[ DEV_EVENT_VERIFY ] = ccw_device_start_id ,
2005-04-16 15:20:36 -07:00
} ,
[ DEV_STATE_DISCONNECTED_SENSE_ID ] = {
2009-12-07 12:51:26 +01:00
[ DEV_EVENT_NOTOPER ] = ccw_device_request_event ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_request_event ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_request_event ,
2005-04-16 15:20:36 -07:00
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
[ DEV_STATE_CMFCHANGE ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_change_cmfstate ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_change_cmfstate ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_change_cmfstate ,
[ DEV_EVENT_VERIFY ] = ccw_device_change_cmfstate ,
} ,
2006-06-29 15:08:41 +02:00
[ DEV_STATE_CMFUPDATE ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_update_cmfblock ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_update_cmfblock ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_update_cmfblock ,
[ DEV_EVENT_VERIFY ] = ccw_device_update_cmfblock ,
} ,
2009-12-07 12:51:32 +01:00
[ DEV_STATE_STEAL_LOCK ] = {
[ DEV_EVENT_NOTOPER ] = ccw_device_request_event ,
[ DEV_EVENT_INTERRUPT ] = ccw_device_request_event ,
[ DEV_EVENT_TIMEOUT ] = ccw_device_request_event ,
[ DEV_EVENT_VERIFY ] = ccw_device_nop ,
} ,
2005-04-16 15:20:36 -07:00
} ;
EXPORT_SYMBOL_GPL ( ccw_device_set_timeout ) ;