2005-04-17 02:20:36 +04:00
/*
* drivers / s390 / cio / cio . c
* S / 390 common I / O routines - - low level i / o calls
*
2008-07-14 11:58:43 +04:00
* Copyright IBM Corp . 1999 , 2008
2005-04-17 02:20:36 +04:00
* Author ( s ) : Ingo Adlung ( adlung @ de . ibm . com )
2006-01-15 00:21:04 +03:00
* Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-17 02:20:36 +04:00
* Arnd Bergmann ( arndb @ de . ibm . com )
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
2008-12-25 15:39:36 +03:00
# define KMSG_COMPONENT "cio"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2009-06-12 12:26:46 +04:00
# include <linux/ftrace.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/device.h>
# include <linux/kernel_stat.h>
# include <linux/interrupt.h>
# include <asm/cio.h>
# include <asm/delay.h>
# include <asm/irq.h>
2006-10-06 18:38:35 +04:00
# include <asm/irq_regs.h>
2006-09-20 17:59:15 +04:00
# include <asm/setup.h>
2006-12-04 17:40:26 +03:00
# include <asm/reset.h>
2007-02-21 12:55:21 +03:00
# include <asm/ipl.h>
2007-04-27 18:01:31 +04:00
# include <asm/chpid.h>
2008-01-26 16:10:44 +03:00
# include <asm/airq.h>
2008-07-14 11:58:58 +04:00
# include <asm/isc.h>
2009-06-12 12:26:21 +04:00
# include <asm/cputime.h>
2008-07-14 11:58:51 +04:00
# include <asm/fcx.h>
2009-03-26 17:24:01 +03:00
# include <asm/nmi.h>
# include <asm/crw.h>
2005-04-17 02:20:36 +04:00
# include "cio.h"
# include "css.h"
# include "chsc.h"
# include "ioasm.h"
2008-01-26 16:10:43 +03:00
# include "io_sch.h"
2005-04-17 02:20:36 +04:00
# include "blacklist.h"
# include "cio_debug.h"
2007-04-27 18:01:28 +04:00
# include "chp.h"
2005-04-17 02:20:36 +04:00
debug_info_t * cio_debug_msg_id ;
debug_info_t * cio_debug_trace_id ;
debug_info_t * cio_debug_crw_id ;
/*
* Function : cio_debug_init
2008-01-26 16:10:42 +03:00
* Initializes three debug logs for common I / O :
* - cio_msg logs generic cio messages
2005-04-17 02:20:36 +04:00
* - cio_trace logs the calling of different functions
2008-01-26 16:10:42 +03:00
* - cio_crw logs machine check related cio messages
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:10:42 +03:00
static int __init cio_debug_init ( void )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:30 +03:00
cio_debug_msg_id = debug_register ( " cio_msg " , 16 , 1 , 16 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_msg_id )
goto out_unregister ;
2008-01-26 16:10:42 +03:00
debug_register_view ( cio_debug_msg_id , & debug_sprintf_view ) ;
debug_set_level ( cio_debug_msg_id , 2 ) ;
2008-01-26 16:11:30 +03:00
cio_debug_trace_id = debug_register ( " cio_trace " , 16 , 1 , 16 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_trace_id )
goto out_unregister ;
2008-01-26 16:10:42 +03:00
debug_register_view ( cio_debug_trace_id , & debug_hex_ascii_view ) ;
debug_set_level ( cio_debug_trace_id , 2 ) ;
2008-01-26 16:11:30 +03:00
cio_debug_crw_id = debug_register ( " cio_crw " , 16 , 1 , 16 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_crw_id )
goto out_unregister ;
2008-01-26 16:10:42 +03:00
debug_register_view ( cio_debug_crw_id , & debug_sprintf_view ) ;
debug_set_level ( cio_debug_crw_id , 4 ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
out_unregister :
if ( cio_debug_msg_id )
2008-01-26 16:10:42 +03:00
debug_unregister ( cio_debug_msg_id ) ;
2005-04-17 02:20:36 +04:00
if ( cio_debug_trace_id )
2008-01-26 16:10:42 +03:00
debug_unregister ( cio_debug_trace_id ) ;
2005-04-17 02:20:36 +04:00
if ( cio_debug_crw_id )
2008-01-26 16:10:42 +03:00
debug_unregister ( cio_debug_crw_id ) ;
2005-04-17 02:20:36 +04:00
return - 1 ;
}
arch_initcall ( cio_debug_init ) ;
2011-03-15 19:08:25 +03:00
int cio_set_options ( struct subchannel * sch , int flags )
2005-04-17 02:20:36 +04:00
{
2011-03-15 19:08:25 +03:00
struct io_subchannel_private * priv = to_io_private ( sch ) ;
2005-04-17 02:20:36 +04:00
2011-03-15 19:08:25 +03:00
priv - > options . suspend = ( flags & DOIO_ALLOW_SUSPEND ) ! = 0 ;
priv - > options . prefetch = ( flags & DOIO_DENY_PREFETCH ) ! = 0 ;
priv - > options . inter = ( flags & DOIO_SUPPRESS_INTER ) ! = 0 ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2007-02-05 23:18:53 +03:00
static int
2005-04-17 02:20:36 +04:00
cio_start_handle_notoper ( struct subchannel * sch , __u8 lpm )
{
char dbf_text [ 15 ] ;
if ( lpm ! = 0 )
sch - > lpm & = ~ lpm ;
else
sch - > lpm = 0 ;
2008-05-07 11:22:54 +04:00
CIO_MSG_EVENT ( 2 , " cio_start: 'not oper' status for "
2006-01-06 11:19:25 +03:00
" subchannel 0.%x.%04x! \n " , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
return - ENODEV ;
2008-10-10 23:33:09 +04:00
sprintf ( dbf_text , " no%s " , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
CIO_TRACE_EVENT ( 0 , dbf_text ) ;
CIO_HEX_EVENT ( 0 , & sch - > schib , sizeof ( struct schib ) ) ;
return ( sch - > lpm ? - EACCES : - ENODEV ) ;
}
int
cio_start_key ( struct subchannel * sch , /* subchannel structure */
struct ccw1 * cpa , /* logical channel prog addr */
__u8 lpm , /* logical path mask */
__u8 key ) /* storage key */
{
2011-03-15 19:08:25 +03:00
struct io_subchannel_private * priv = to_io_private ( sch ) ;
union orb * orb = & priv - > orb ;
2005-04-17 02:20:36 +04:00
int ccode ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 5 , " stIO " ) ;
CIO_TRACE_EVENT ( 5 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
2008-09-16 20:32:19 +04:00
memset ( orb , 0 , sizeof ( union orb ) ) ;
2005-04-17 02:20:36 +04:00
/* sch is always under 2G. */
2008-07-14 11:58:51 +04:00
orb - > cmd . intparm = ( u32 ) ( addr_t ) sch ;
orb - > cmd . fmt = 1 ;
2005-04-17 02:20:36 +04:00
2011-03-15 19:08:25 +03:00
orb - > cmd . pfch = priv - > options . prefetch = = 0 ;
orb - > cmd . spnd = priv - > options . suspend ;
orb - > cmd . ssic = priv - > options . suspend & & priv - > options . inter ;
2008-07-14 11:58:51 +04:00
orb - > cmd . lpm = ( lpm ! = 0 ) ? lpm : sch - > lpm ;
2006-01-06 11:19:28 +03:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* for 64 bit we always support 64 bit IDAWs with 4 k page size only
*/
2008-07-14 11:58:51 +04:00
orb - > cmd . c64 = 1 ;
orb - > cmd . i2k = 0 ;
2005-04-17 02:20:36 +04:00
# endif
2008-07-14 11:58:51 +04:00
orb - > cmd . key = key > > 4 ;
2005-04-17 02:20:36 +04:00
/* issue "Start Subchannel" */
2008-07-14 11:58:51 +04:00
orb - > cmd . cpa = ( __u32 ) __pa ( cpa ) ;
2008-01-26 16:10:43 +03:00
ccode = ssch ( sch - > schid , orb ) ;
2005-04-17 02:20:36 +04:00
/* process condition code */
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 5 , & ccode , sizeof ( ccode ) ) ;
2005-04-17 02:20:36 +04:00
switch ( ccode ) {
case 0 :
/*
* initialize device status information
*/
2008-07-14 11:58:50 +04:00
sch - > schib . scsw . cmd . actl | = SCSW_ACTL_START_PEND ;
2005-04-17 02:20:36 +04:00
return 0 ;
case 1 : /* status pending */
case 2 : /* busy */
return - EBUSY ;
2008-09-09 14:38:58 +04:00
case 3 : /* device/path not operational */
2005-04-17 02:20:36 +04:00
return cio_start_handle_notoper ( sch , lpm ) ;
2008-09-09 14:38:58 +04:00
default :
return ccode ;
2005-04-17 02:20:36 +04:00
}
}
int
cio_start ( struct subchannel * sch , struct ccw1 * cpa , __u8 lpm )
{
2005-05-01 19:58:58 +04:00
return cio_start_key ( sch , cpa , lpm , PAGE_DEFAULT_KEY ) ;
2005-04-17 02:20:36 +04:00
}
/*
* resume suspended I / O operation
*/
int
cio_resume ( struct subchannel * sch )
{
int ccode ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 4 , " resIO " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 4 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:19:21 +03:00
ccode = rsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 4 , & ccode , sizeof ( ccode ) ) ;
2005-04-17 02:20:36 +04:00
switch ( ccode ) {
case 0 :
2008-07-14 11:58:50 +04:00
sch - > schib . scsw . cmd . actl | = SCSW_ACTL_RESUME_PEND ;
2005-04-17 02:20:36 +04:00
return 0 ;
case 1 :
return - EBUSY ;
case 2 :
return - EINVAL ;
default :
/*
* useless to wait for request completion
* as device is no longer operational !
*/
return - ENODEV ;
}
}
/*
* halt I / O operation
*/
int
cio_halt ( struct subchannel * sch )
{
int ccode ;
if ( ! sch )
return - ENODEV ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 2 , " haltIO " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Issue " Halt subchannel " and process condition code
*/
2006-01-06 11:19:21 +03:00
ccode = hsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 2 , & ccode , sizeof ( ccode ) ) ;
2005-04-17 02:20:36 +04:00
switch ( ccode ) {
case 0 :
2008-07-14 11:58:50 +04:00
sch - > schib . scsw . cmd . actl | = SCSW_ACTL_HALT_PEND ;
2005-04-17 02:20:36 +04:00
return 0 ;
case 1 : /* status pending */
case 2 : /* busy */
return - EBUSY ;
default : /* device not operational */
return - ENODEV ;
}
}
/*
* Clear I / O operation
*/
int
cio_clear ( struct subchannel * sch )
{
int ccode ;
if ( ! sch )
return - ENODEV ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 2 , " clearIO " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
/*
* Issue " Clear subchannel " and process condition code
*/
2006-01-06 11:19:21 +03:00
ccode = csch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 2 , & ccode , sizeof ( ccode ) ) ;
2005-04-17 02:20:36 +04:00
switch ( ccode ) {
case 0 :
2008-07-14 11:58:50 +04:00
sch - > schib . scsw . cmd . actl | = SCSW_ACTL_CLEAR_PEND ;
2005-04-17 02:20:36 +04:00
return 0 ;
default : /* device not operational */
return - ENODEV ;
}
}
/*
* Function : cio_cancel
* Issues a " Cancel Subchannel " on the specified subchannel
* Note : We don ' t need any fancy intparms and flags here
* since xsch is executed synchronously .
* Only for common I / O internal use as for now .
*/
int
cio_cancel ( struct subchannel * sch )
{
int ccode ;
if ( ! sch )
return - ENODEV ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 2 , " cancelIO " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:19:21 +03:00
ccode = xsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 2 , & ccode , sizeof ( ccode ) ) ;
2005-04-17 02:20:36 +04:00
switch ( ccode ) {
case 0 : /* success */
/* Update information in scsw. */
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
return - ENODEV ;
2005-04-17 02:20:36 +04:00
return 0 ;
case 1 : /* status pending */
return - EBUSY ;
case 2 : /* not applicable */
return - EINVAL ;
default : /* not oper */
return - ENODEV ;
}
}
2008-12-25 15:39:13 +03:00
static void cio_apply_config ( struct subchannel * sch , struct schib * schib )
{
schib - > pmcw . intparm = sch - > config . intparm ;
schib - > pmcw . mbi = sch - > config . mbi ;
schib - > pmcw . isc = sch - > config . isc ;
schib - > pmcw . ena = sch - > config . ena ;
schib - > pmcw . mme = sch - > config . mme ;
schib - > pmcw . mp = sch - > config . mp ;
schib - > pmcw . csense = sch - > config . csense ;
schib - > pmcw . mbfc = sch - > config . mbfc ;
if ( sch - > config . mbfc )
schib - > mba = sch - > config . mba ;
}
static int cio_check_config ( struct subchannel * sch , struct schib * schib )
{
return ( schib - > pmcw . intparm = = sch - > config . intparm ) & &
( schib - > pmcw . mbi = = sch - > config . mbi ) & &
( schib - > pmcw . isc = = sch - > config . isc ) & &
( schib - > pmcw . ena = = sch - > config . ena ) & &
( schib - > pmcw . mme = = sch - > config . mme ) & &
( schib - > pmcw . mp = = sch - > config . mp ) & &
( schib - > pmcw . csense = = sch - > config . csense ) & &
( schib - > pmcw . mbfc = = sch - > config . mbfc ) & &
( ! sch - > config . mbfc | | ( schib - > mba = = sch - > config . mba ) ) ;
}
2005-04-17 02:20:36 +04:00
/*
2008-12-25 15:39:13 +03:00
* cio_commit_config - apply configuration to the subchannel
2005-04-17 02:20:36 +04:00
*/
2008-12-25 15:39:13 +03:00
int cio_commit_config ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
2008-12-25 15:39:13 +03:00
struct schib schib ;
int ccode , retry , ret = 0 ;
2010-04-22 19:17:05 +04:00
if ( stsch_err ( sch - > schid , & schib ) | | ! css_sch_is_valid ( & schib ) )
2008-12-25 15:39:13 +03:00
return - ENODEV ;
2005-04-17 02:20:36 +04:00
for ( retry = 0 ; retry < 5 ; retry + + ) {
2008-12-25 15:39:13 +03:00
/* copy desired changes to local schib */
cio_apply_config ( sch , & schib ) ;
ccode = msch_err ( sch - > schid , & schib ) ;
if ( ccode < 0 ) /* -EIO if msch gets a program check. */
2005-04-17 02:20:36 +04:00
return ccode ;
switch ( ccode ) {
2009-01-08 05:09:16 +03:00
case 0 : /* successful */
2010-04-22 19:17:05 +04:00
if ( stsch_err ( sch - > schid , & schib ) | |
2008-12-25 15:39:13 +03:00
! css_sch_is_valid ( & schib ) )
return - ENODEV ;
if ( cio_check_config ( sch , & schib ) ) {
/* commit changes from local schib */
memcpy ( & sch - > schib , & schib , sizeof ( schib ) ) ;
return 0 ;
}
ret = - EAGAIN ;
break ;
case 1 : /* status pending */
2005-04-17 02:20:36 +04:00
return - EBUSY ;
2008-12-25 15:39:13 +03:00
case 2 : /* busy */
udelay ( 100 ) ; /* allow for recovery */
2005-04-17 02:20:36 +04:00
ret = - EBUSY ;
break ;
2008-12-25 15:39:13 +03:00
case 3 : /* not operational */
2005-04-17 02:20:36 +04:00
return - ENODEV ;
}
}
return ret ;
}
2008-12-25 15:39:12 +03:00
/**
* cio_update_schib - Perform stsch and update schib if subchannel is valid .
* @ sch : subchannel on which to perform stsch
* Return zero on success , - ENODEV otherwise .
*/
int cio_update_schib ( struct subchannel * sch )
{
struct schib schib ;
2010-04-22 19:17:05 +04:00
if ( stsch_err ( sch - > schid , & schib ) | | ! css_sch_is_valid ( & schib ) )
2008-12-25 15:39:12 +03:00
return - ENODEV ;
memcpy ( & sch - > schib , & schib , sizeof ( schib ) ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( cio_update_schib ) ;
2008-07-14 11:58:47 +04:00
/**
* cio_enable_subchannel - enable a subchannel .
* @ sch : subchannel to be enabled
* @ intparm : interruption parameter to set
2005-04-17 02:20:36 +04:00
*/
2008-04-30 15:38:39 +04:00
int cio_enable_subchannel ( struct subchannel * sch , u32 intparm )
2005-04-17 02:20:36 +04:00
{
int retry ;
int ret ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 2 , " ensch " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 17:54:28 +03:00
if ( sch_is_pseudo_sch ( sch ) )
return - EINVAL ;
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2008-12-25 15:39:13 +03:00
sch - > config . ena = 1 ;
sch - > config . isc = sch - > isc ;
sch - > config . intparm = intparm ;
for ( retry = 0 ; retry < 3 ; retry + + ) {
ret = cio_commit_config ( sch ) ;
if ( ret = = - EIO ) {
2005-04-17 02:20:36 +04:00
/*
2008-12-25 15:39:13 +03:00
* Got a program check in msch . Try without
2005-04-17 02:20:36 +04:00
* the concurrent sense bit the next time .
*/
2008-12-25 15:39:13 +03:00
sch - > config . csense = 0 ;
} else if ( ret = = - EBUSY ) {
2005-04-17 02:20:36 +04:00
struct irb irb ;
2006-01-06 11:19:21 +03:00
if ( tsch ( sch - > schid , & irb ) ! = 0 )
2005-04-17 02:20:36 +04:00
break ;
2008-12-25 15:39:13 +03:00
} else
break ;
2005-04-17 02:20:36 +04:00
}
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 2 , & ret , sizeof ( ret ) ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-07-14 11:58:47 +04:00
EXPORT_SYMBOL_GPL ( cio_enable_subchannel ) ;
2005-04-17 02:20:36 +04:00
2008-07-14 11:58:47 +04:00
/**
* cio_disable_subchannel - disable a subchannel .
* @ sch : subchannel to disable
2005-04-17 02:20:36 +04:00
*/
2008-07-14 11:58:47 +04:00
int cio_disable_subchannel ( struct subchannel * sch )
2005-04-17 02:20:36 +04:00
{
2009-03-26 17:24:06 +03:00
int retry ;
2005-04-17 02:20:36 +04:00
int ret ;
2009-09-11 12:28:18 +04:00
CIO_TRACE_EVENT ( 2 , " dissch " ) ;
2008-10-10 23:33:09 +04:00
CIO_TRACE_EVENT ( 2 , dev_name ( & sch - > dev ) ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 17:54:28 +03:00
if ( sch_is_pseudo_sch ( sch ) )
return 0 ;
2008-12-25 15:39:12 +03:00
if ( cio_update_schib ( sch ) )
2005-04-17 02:20:36 +04:00
return - ENODEV ;
2008-12-25 15:39:13 +03:00
sch - > config . ena = 0 ;
2009-03-26 17:24:06 +03:00
for ( retry = 0 ; retry < 3 ; retry + + ) {
ret = cio_commit_config ( sch ) ;
if ( ret = = - EBUSY ) {
struct irb irb ;
if ( tsch ( sch - > schid , & irb ) ! = 0 )
break ;
} else
break ;
}
2009-09-11 12:28:18 +04:00
CIO_HEX_EVENT ( 2 , & ret , sizeof ( ret ) ) ;
2005-04-17 02:20:36 +04:00
return ret ;
}
2008-07-14 11:58:47 +04:00
EXPORT_SYMBOL_GPL ( cio_disable_subchannel ) ;
2005-04-17 02:20:36 +04:00
2006-12-08 17:54:28 +03:00
int cio_create_sch_lock ( struct subchannel * sch )
2006-12-08 17:54:26 +03:00
{
sch - > lock = kmalloc ( sizeof ( spinlock_t ) , GFP_KERNEL ) ;
if ( ! sch - > lock )
return - ENOMEM ;
spin_lock_init ( sch - > lock ) ;
return 0 ;
}
2008-07-14 11:58:48 +04:00
static int cio_check_devno_blacklisted ( struct subchannel * sch )
2008-07-14 11:58:43 +04:00
{
if ( is_blacklisted ( sch - > schid . ssid , sch - > schib . pmcw . dev ) ) {
/*
* This device must not be known to Linux . So we simply
* say that there is no device and return ENODEV .
*/
CIO_MSG_EVENT ( 6 , " Blacklisted device detected "
" at devno %04X, subchannel set %x \n " ,
sch - > schib . pmcw . dev , sch - > schid . ssid ) ;
return - ENODEV ;
}
return 0 ;
}
2008-07-14 11:58:48 +04:00
static int cio_validate_io_subchannel ( struct subchannel * sch )
{
/* Initialization for io subchannels. */
if ( ! css_sch_is_valid ( & sch - > schib ) )
return - ENODEV ;
/* Devno is valid. */
return cio_check_devno_blacklisted ( sch ) ;
}
static int cio_validate_msg_subchannel ( struct subchannel * sch )
{
/* Initialization for message subchannels. */
if ( ! css_sch_is_valid ( & sch - > schib ) )
return - ENODEV ;
/* Devno is valid. */
return cio_check_devno_blacklisted ( sch ) ;
}
2008-07-14 11:58:43 +04:00
/**
* cio_validate_subchannel - basic validation of subchannel
* @ sch : subchannel structure to be filled out
* @ schid : subchannel id
2005-04-17 02:20:36 +04:00
*
* Find out subchannel type and initialize struct subchannel .
* Return codes :
2008-07-14 11:58:43 +04:00
* 0 on success
2005-04-17 02:20:36 +04:00
* - ENXIO for non - defined subchannels
2008-07-14 11:58:43 +04:00
* - ENODEV for invalid subchannels or blacklisted devices
* - EIO for subchannels in an invalid subchannel set
2005-04-17 02:20:36 +04:00
*/
2008-07-14 11:58:43 +04:00
int cio_validate_subchannel ( struct subchannel * sch , struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
char dbf_txt [ 15 ] ;
int ccode ;
2006-12-08 17:54:26 +03:00
int err ;
2005-04-17 02:20:36 +04:00
2008-07-14 11:58:43 +04:00
sprintf ( dbf_txt , " valsch%x " , schid . sch_no ) ;
CIO_TRACE_EVENT ( 4 , dbf_txt ) ;
2005-04-17 02:20:36 +04:00
/* Nuke all fields. */
memset ( sch , 0 , sizeof ( struct subchannel ) ) ;
2006-12-08 17:54:26 +03:00
sch - > schid = schid ;
if ( cio_is_console ( schid ) ) {
sch - > lock = cio_get_console_lock ( ) ;
} else {
err = cio_create_sch_lock ( sch ) ;
if ( err )
goto out ;
}
2006-07-12 18:39:50 +04:00
mutex_init ( & sch - > reg_mutex ) ;
2005-04-17 02:20:36 +04:00
/*
* The first subchannel that is not - operational ( ccode = = 3 )
* indicates that there aren ' t any more devices available .
2006-01-06 11:19:25 +03:00
* If stsch gets an exception , it means the current subchannel set
* is not valid .
2005-04-17 02:20:36 +04:00
*/
2006-01-06 11:19:25 +03:00
ccode = stsch_err ( schid , & sch - > schib ) ;
2006-12-08 17:54:26 +03:00
if ( ccode ) {
err = ( ccode = = 3 ) ? - ENXIO : ccode ;
goto out ;
}
2005-04-17 02:20:36 +04:00
/* Copy subchannel type from path management control word. */
sch - > st = sch - > schib . pmcw . st ;
2008-07-14 11:58:45 +04:00
2008-07-14 11:58:43 +04:00
switch ( sch - > st ) {
case SUBCHANNEL_TYPE_IO :
err = cio_validate_io_subchannel ( sch ) ;
break ;
2008-07-14 11:58:48 +04:00
case SUBCHANNEL_TYPE_MSG :
err = cio_validate_msg_subchannel ( sch ) ;
break ;
2008-07-14 11:58:43 +04:00
default :
err = 0 ;
2005-04-17 02:20:36 +04:00
}
2008-07-14 11:58:43 +04:00
if ( err )
2008-01-26 16:10:51 +03:00
goto out ;
2008-07-14 11:58:43 +04:00
CIO_MSG_EVENT ( 4 , " Subchannel 0.%x.%04x reports subchannel type %04X \n " ,
sch - > schid . ssid , sch - > schid . sch_no , sch - > st ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2006-12-08 17:54:26 +03:00
out :
if ( ! cio_is_console ( schid ) )
kfree ( sch - > lock ) ;
sch - > lock = NULL ;
return err ;
2005-04-17 02:20:36 +04:00
}
/*
* do_IRQ ( ) handles all normal I / O device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*
*/
2009-06-12 12:26:46 +04:00
void __irq_entry do_IRQ ( struct pt_regs * regs )
2005-04-17 02:20:36 +04:00
{
struct tpi_info * tpi_info ;
struct subchannel * sch ;
struct irb * irb ;
2006-10-06 18:38:35 +04:00
struct pt_regs * old_regs ;
2005-04-17 02:20:36 +04:00
2006-10-06 18:38:35 +04:00
old_regs = set_irq_regs ( regs ) ;
2008-12-31 17:11:41 +03:00
irq_enter ( ) ;
2010-12-06 20:16:24 +03:00
__this_cpu_write ( s390_idle . nohz_delay , 1 ) ;
2008-04-17 09:46:25 +04:00
if ( S390_lowcore . int_clock > = S390_lowcore . clock_comparator )
/* Serve timer interrupts first. */
clock_comparator_work ( ) ;
2005-04-17 02:20:36 +04:00
/*
* Get interrupt information from lowcore
*/
2010-02-27 00:37:43 +03:00
tpi_info = ( struct tpi_info * ) & S390_lowcore . subchannel_id ;
irb = ( struct irb * ) & S390_lowcore . irb ;
2005-04-17 02:20:36 +04:00
do {
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IO_INTERRUPT ] + + ;
2011-03-15 19:08:20 +03:00
if ( tpi_info - > adapter_IO ) {
2008-07-14 11:58:59 +04:00
do_adapter_IO ( tpi_info - > isc ) ;
2005-04-17 02:20:36 +04:00
continue ;
}
sch = ( struct subchannel * ) ( unsigned long ) tpi_info - > intparm ;
2008-04-17 09:46:26 +04:00
if ( ! sch ) {
/* Clear pending interrupt condition. */
2011-10-30 18:16:04 +04:00
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
2008-04-17 09:46:26 +04:00
tsch ( tpi_info - > schid , irb ) ;
continue ;
}
spin_lock ( sch - > lock ) ;
2005-04-17 02:20:36 +04:00
/* Store interrupt response block to lowcore. */
2008-04-17 09:46:26 +04:00
if ( tsch ( tpi_info - > schid , irb ) = = 0 ) {
2005-04-17 02:20:36 +04:00
/* Keep subchannel information word up to date. */
memcpy ( & sch - > schib . scsw , & irb - > scsw ,
sizeof ( irb - > scsw ) ) ;
/* Call interrupt handler if there is one. */
if ( sch - > driver & & sch - > driver - > irq )
2008-01-26 16:10:39 +03:00
sch - > driver - > irq ( sch ) ;
2011-10-30 18:16:04 +04:00
else
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
} else
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
2008-04-17 09:46:26 +04:00
spin_unlock ( sch - > lock ) ;
2005-04-17 02:20:36 +04:00
/*
* Are more interrupts pending ?
* If so , the tpi instruction will update the lowcore
* to hold the info for the next interrupt .
* We don ' t do this for VM because a tpi drops the cpu
* out of the sie which costs more cycles than it saves .
*/
2010-02-27 00:37:38 +03:00
} while ( MACHINE_IS_LPAR & & tpi ( NULL ) ! = 0 ) ;
2006-10-11 17:31:26 +04:00
irq_exit ( ) ;
2006-10-06 18:38:35 +04:00
set_irq_regs ( old_regs ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel ;
2008-01-26 16:10:43 +03:00
static struct io_subchannel_private console_priv ;
2005-04-17 02:20:36 +04:00
static int console_subchannel_in_use ;
2008-12-25 15:39:01 +03:00
/*
2011-09-26 18:40:35 +04:00
* Use cio_tpi to get a pending interrupt and call the interrupt handler .
* Return non - zero if an interrupt was processed , zero otherwise .
2008-12-25 15:39:01 +03:00
*/
static int cio_tpi ( void )
{
struct tpi_info * tpi_info ;
struct subchannel * sch ;
struct irb * irb ;
int irq_context ;
2010-02-27 00:37:43 +03:00
tpi_info = ( struct tpi_info * ) & S390_lowcore . subchannel_id ;
2008-12-25 15:39:01 +03:00
if ( tpi ( NULL ) ! = 1 )
return 0 ;
2011-10-30 18:16:04 +04:00
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IO_INTERRUPT ] + + ;
2011-09-26 18:40:35 +04:00
if ( tpi_info - > adapter_IO ) {
do_adapter_IO ( tpi_info - > isc ) ;
return 1 ;
}
2010-02-27 00:37:43 +03:00
irb = ( struct irb * ) & S390_lowcore . irb ;
2008-12-25 15:39:01 +03:00
/* Store interrupt response block to lowcore. */
2011-10-30 18:16:04 +04:00
if ( tsch ( tpi_info - > schid , irb ) ! = 0 ) {
2008-12-25 15:39:01 +03:00
/* Not status pending or not operational. */
2011-10-30 18:16:04 +04:00
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
2008-12-25 15:39:01 +03:00
return 1 ;
2011-10-30 18:16:04 +04:00
}
2008-12-25 15:39:01 +03:00
sch = ( struct subchannel * ) ( unsigned long ) tpi_info - > intparm ;
2011-10-30 18:16:04 +04:00
if ( ! sch ) {
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
2008-12-25 15:39:01 +03:00
return 1 ;
2011-10-30 18:16:04 +04:00
}
2008-12-25 15:39:01 +03:00
irq_context = in_interrupt ( ) ;
if ( ! irq_context )
local_bh_disable ( ) ;
irq_enter ( ) ;
spin_lock ( sch - > lock ) ;
memcpy ( & sch - > schib . scsw , & irb - > scsw , sizeof ( union scsw ) ) ;
if ( sch - > driver & & sch - > driver - > irq )
sch - > driver - > irq ( sch ) ;
2011-10-30 18:16:04 +04:00
else
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IOINT_CIO ] + + ;
2008-12-25 15:39:01 +03:00
spin_unlock ( sch - > lock ) ;
irq_exit ( ) ;
if ( ! irq_context )
_local_bh_enable ( ) ;
return 1 ;
}
2008-01-26 16:10:43 +03:00
void * cio_get_console_priv ( void )
{
return & console_priv ;
}
2005-04-17 02:20:36 +04:00
/*
* busy wait for the next interrupt on the console
*/
2008-04-17 09:46:26 +04:00
void wait_cons_dev ( void )
__releases ( console_subchannel . lock )
__acquires ( console_subchannel . lock )
2005-04-17 02:20:36 +04:00
{
unsigned long cr6 __attribute__ ( ( aligned ( 8 ) ) ) ;
unsigned long save_cr6 __attribute__ ( ( aligned ( 8 ) ) ) ;
/*
* before entering the spinlock we may already have
* processed the interrupt on a different CPU . . .
*/
if ( ! console_subchannel_in_use )
return ;
2008-07-14 11:58:58 +04:00
/* disable all but the console isc */
2005-04-17 02:20:36 +04:00
__ctl_store ( save_cr6 , 6 , 6 ) ;
2008-07-14 11:58:58 +04:00
cr6 = 1UL < < ( 31 - CONSOLE_ISC ) ;
2005-04-17 02:20:36 +04:00
__ctl_load ( cr6 , 6 , 6 ) ;
do {
2006-12-08 17:54:26 +03:00
spin_unlock ( console_subchannel . lock ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_tpi ( ) )
cpu_relax ( ) ;
2006-12-08 17:54:26 +03:00
spin_lock ( console_subchannel . lock ) ;
2008-07-14 11:58:50 +04:00
} while ( console_subchannel . schib . scsw . cmd . actl ! = 0 ) ;
2005-04-17 02:20:36 +04:00
/*
* restore previous isc value
*/
__ctl_load ( save_cr6 , 6 , 6 ) ;
}
static int
2006-01-06 11:19:22 +03:00
cio_test_for_console ( struct subchannel_id schid , void * data )
{
2006-01-06 11:19:25 +03:00
if ( stsch_err ( schid , & console_subchannel . schib ) ! = 0 )
2006-01-06 11:19:22 +03:00
return - ENXIO ;
2008-01-26 16:10:45 +03:00
if ( ( console_subchannel . schib . pmcw . st = = SUBCHANNEL_TYPE_IO ) & &
console_subchannel . schib . pmcw . dnv & &
( console_subchannel . schib . pmcw . dev = = console_devno ) ) {
2006-01-06 11:19:22 +03:00
console_irq = schid . sch_no ;
return 1 ; /* found */
}
return 0 ;
}
static int
cio_get_console_sch_no ( void )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:21 +03:00
struct subchannel_id schid ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:19:21 +03:00
init_subchannel_id ( & schid ) ;
2005-04-17 02:20:36 +04:00
if ( console_irq ! = - 1 ) {
/* VM provided us with the irq number of the console. */
2006-01-06 11:19:21 +03:00
schid . sch_no = console_irq ;
2010-04-22 19:17:05 +04:00
if ( stsch_err ( schid , & console_subchannel . schib ) ! = 0 | |
2008-01-26 16:10:45 +03:00
( console_subchannel . schib . pmcw . st ! = SUBCHANNEL_TYPE_IO ) | |
2005-04-17 02:20:36 +04:00
! console_subchannel . schib . pmcw . dnv )
return - 1 ;
console_devno = console_subchannel . schib . pmcw . dev ;
} else if ( console_devno ! = - 1 ) {
/* At least the console device number is known. */
2006-01-06 11:19:22 +03:00
for_each_subchannel ( cio_test_for_console , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( console_irq = = - 1 )
return - 1 ;
} else {
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized .
* With some luck , the HWC console can take over */
return - 1 ;
}
return console_irq ;
}
struct subchannel *
cio_probe_console ( void )
{
2006-01-06 11:19:22 +03:00
int sch_no , ret ;
2006-01-06 11:19:21 +03:00
struct subchannel_id schid ;
2005-04-17 02:20:36 +04:00
if ( xchg ( & console_subchannel_in_use , 1 ) ! = 0 )
return ERR_PTR ( - EBUSY ) ;
2006-01-06 11:19:22 +03:00
sch_no = cio_get_console_sch_no ( ) ;
if ( sch_no = = - 1 ) {
2005-04-17 02:20:36 +04:00
console_subchannel_in_use = 0 ;
2008-12-25 15:39:36 +03:00
pr_warning ( " No CCW console was found \n " ) ;
2005-04-17 02:20:36 +04:00
return ERR_PTR ( - ENODEV ) ;
}
memset ( & console_subchannel , 0 , sizeof ( struct subchannel ) ) ;
2006-01-06 11:19:21 +03:00
init_subchannel_id ( & schid ) ;
2006-01-06 11:19:22 +03:00
schid . sch_no = sch_no ;
2006-01-06 11:19:21 +03:00
ret = cio_validate_subchannel ( & console_subchannel , schid ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
console_subchannel_in_use = 0 ;
return ERR_PTR ( - ENODEV ) ;
}
/*
2008-07-14 11:58:58 +04:00
* enable console I / O - interrupt subclass
2005-04-17 02:20:36 +04:00
*/
2008-07-14 11:59:01 +04:00
isc_register ( CONSOLE_ISC ) ;
2008-12-25 15:39:13 +03:00
console_subchannel . config . isc = CONSOLE_ISC ;
console_subchannel . config . intparm = ( u32 ) ( addr_t ) & console_subchannel ;
ret = cio_commit_config ( & console_subchannel ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
2008-07-14 11:59:01 +04:00
isc_unregister ( CONSOLE_ISC ) ;
2005-04-17 02:20:36 +04:00
console_subchannel_in_use = 0 ;
return ERR_PTR ( ret ) ;
}
return & console_subchannel ;
}
void
cio_release_console ( void )
{
2008-12-25 15:39:13 +03:00
console_subchannel . config . intparm = 0 ;
cio_commit_config ( & console_subchannel ) ;
2008-07-14 11:59:01 +04:00
isc_unregister ( CONSOLE_ISC ) ;
2005-04-17 02:20:36 +04:00
console_subchannel_in_use = 0 ;
}
/* Bah... hack to catch console special sausages. */
int
2006-01-06 11:19:21 +03:00
cio_is_console ( struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
if ( ! console_subchannel_in_use )
return 0 ;
2006-01-06 11:19:21 +03:00
return schid_equal ( & schid , & console_subchannel . schid ) ;
2005-04-17 02:20:36 +04:00
}
struct subchannel *
cio_get_console_subchannel ( void )
{
if ( ! console_subchannel_in_use )
2006-07-12 18:41:55 +04:00
return NULL ;
2005-04-17 02:20:36 +04:00
return & console_subchannel ;
}
# endif
2007-02-05 23:18:53 +03:00
static int
2006-01-06 11:19:21 +03:00
__disable_subchannel_easy ( struct subchannel_id schid , struct schib * schib )
2005-04-17 02:20:36 +04:00
{
int retry , cc ;
cc = 0 ;
for ( retry = 0 ; retry < 3 ; retry + + ) {
schib - > pmcw . ena = 0 ;
2010-04-22 19:17:05 +04:00
cc = msch_err ( schid , schib ) ;
2005-04-17 02:20:36 +04:00
if ( cc )
return ( cc = = 3 ? - ENODEV : - EBUSY ) ;
2010-04-22 19:17:05 +04:00
if ( stsch_err ( schid , schib ) | | ! css_sch_is_valid ( schib ) )
2008-12-25 15:39:12 +03:00
return - ENODEV ;
2005-04-17 02:20:36 +04:00
if ( ! schib - > pmcw . ena )
return 0 ;
}
return - EBUSY ; /* uhm... */
}
2007-02-05 23:18:53 +03:00
static int
2008-07-14 11:58:43 +04:00
__clear_io_subchannel_easy ( struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
int retry ;
if ( csch ( schid ) )
return - ENODEV ;
for ( retry = 0 ; retry < 20 ; retry + + ) {
struct tpi_info ti ;
if ( tpi ( & ti ) ) {
2010-02-27 00:37:43 +03:00
tsch ( ti . schid , ( struct irb * ) & S390_lowcore . irb ) ;
2006-01-06 11:19:21 +03:00
if ( schid_equal ( & ti . schid , & schid ) )
2005-09-04 02:58:01 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-10-10 23:33:22 +04:00
udelay_simple ( 100 ) ;
2005-04-17 02:20:36 +04:00
}
return - EBUSY ;
}
2008-07-14 11:59:05 +04:00
static void __clear_chsc_subchannel_easy ( void )
{
/* It seems we can only wait for a bit here :/ */
2008-10-10 23:33:22 +04:00
udelay_simple ( 100 ) ;
2008-07-14 11:59:05 +04:00
}
2006-12-15 19:18:22 +03:00
static int pgm_check_occured ;
static void cio_reset_pgm_check_handler ( void )
{
pgm_check_occured = 1 ;
}
2008-10-10 23:33:15 +04:00
static int stsch_reset ( struct subchannel_id schid , struct schib * addr )
2006-12-15 19:18:22 +03:00
{
int rc ;
pgm_check_occured = 0 ;
2007-02-05 23:18:37 +03:00
s390_base_pgm_handler_fn = cio_reset_pgm_check_handler ;
2010-04-22 19:17:05 +04:00
rc = stsch_err ( schid , addr ) ;
2007-02-05 23:18:37 +03:00
s390_base_pgm_handler_fn = NULL ;
2006-12-28 02:35:36 +03:00
2007-02-05 23:18:37 +03:00
/* The program check handler could have changed pgm_check_occured. */
2007-01-09 12:18:41 +03:00
barrier ( ) ;
2006-12-28 02:35:36 +03:00
2006-12-15 19:18:22 +03:00
if ( pgm_check_occured )
return - EIO ;
else
return rc ;
}
2006-12-04 17:40:26 +03:00
static int __shutdown_subchannel_easy ( struct subchannel_id schid , void * data )
2006-01-06 11:19:22 +03:00
{
struct schib schib ;
2006-12-15 19:18:22 +03:00
if ( stsch_reset ( schid , & schib ) )
2006-01-06 11:19:22 +03:00
return - ENXIO ;
if ( ! schib . pmcw . ena )
return 0 ;
switch ( __disable_subchannel_easy ( schid , & schib ) ) {
case 0 :
case - ENODEV :
break ;
default : /* -EBUSY */
2008-07-14 11:58:43 +04:00
switch ( schib . pmcw . st ) {
case SUBCHANNEL_TYPE_IO :
if ( __clear_io_subchannel_easy ( schid ) )
goto out ; /* give up... */
break ;
2008-07-14 11:59:05 +04:00
case SUBCHANNEL_TYPE_CHSC :
__clear_chsc_subchannel_easy ( ) ;
break ;
2008-07-14 11:58:43 +04:00
default :
/* No default clear strategy */
break ;
}
2010-04-22 19:17:05 +04:00
stsch_err ( schid , & schib ) ;
2006-01-06 11:19:22 +03:00
__disable_subchannel_easy ( schid , & schib ) ;
}
2008-07-14 11:58:43 +04:00
out :
2006-01-06 11:19:22 +03:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
2006-12-04 17:40:26 +03:00
static atomic_t chpid_reset_count ;
static void s390_reset_chpids_mcck_handler ( void )
{
struct crw crw ;
struct mci * mci ;
/* Check for pending channel report word. */
mci = ( struct mci * ) & S390_lowcore . mcck_interruption_code ;
if ( ! mci - > cp )
return ;
/* Process channel report words. */
while ( stcrw ( & crw ) = = 0 ) {
/* Check for responses to RCHP. */
if ( crw . slct & & crw . rsc = = CRW_RSC_CPATH )
atomic_dec ( & chpid_reset_count ) ;
}
}
# define RCHP_TIMEOUT (30 * USEC_PER_SEC)
static void css_reset ( void )
{
int i , ret ;
unsigned long long timeout ;
2007-04-27 18:01:26 +04:00
struct chp_id chpid ;
2006-12-04 17:40:26 +03:00
/* Reset subchannels. */
for_each_subchannel ( __shutdown_subchannel_easy , NULL ) ;
/* Reset channel paths. */
2007-02-05 23:18:37 +03:00
s390_base_mcck_handler_fn = s390_reset_chpids_mcck_handler ;
2006-12-04 17:40:26 +03:00
/* Enable channel report machine checks. */
__ctl_set_bit ( 14 , 28 ) ;
/* Temporarily reenable machine checks. */
local_mcck_enable ( ) ;
2007-04-27 18:01:26 +04:00
chp_id_init ( & chpid ) ;
2006-12-04 17:40:26 +03:00
for ( i = 0 ; i < = __MAX_CHPID ; i + + ) {
2007-04-27 18:01:26 +04:00
chpid . id = i ;
ret = rchp ( chpid ) ;
2006-12-04 17:40:26 +03:00
if ( ( ret = = 0 ) | | ( ret = = 2 ) )
/*
* rchp either succeeded , or another rchp is already
* in progress . In either case , we ' ll get a crw .
*/
atomic_inc ( & chpid_reset_count ) ;
}
/* Wait for machine check for all channel paths. */
timeout = get_clock ( ) + ( RCHP_TIMEOUT < < 12 ) ;
while ( atomic_read ( & chpid_reset_count ) ! = 0 ) {
if ( get_clock ( ) > timeout )
break ;
cpu_relax ( ) ;
}
/* Disable machine checks again. */
local_mcck_disable ( ) ;
/* Disable channel report machine checks. */
__ctl_clear_bit ( 14 , 28 ) ;
2007-02-05 23:18:37 +03:00
s390_base_mcck_handler_fn = NULL ;
2006-12-04 17:40:26 +03:00
}
static struct reset_call css_reset_call = {
. fn = css_reset ,
} ;
static int __init init_css_reset_call ( void )
{
atomic_set ( & chpid_reset_count , 0 ) ;
register_reset_call ( & css_reset_call ) ;
return 0 ;
}
arch_initcall ( init_css_reset_call ) ;
struct sch_match_id {
struct subchannel_id schid ;
struct ccw_dev_id devid ;
int rc ;
} ;
static int __reipl_subchannel_match ( struct subchannel_id schid , void * data )
{
struct schib schib ;
struct sch_match_id * match_id = data ;
2006-12-15 19:18:22 +03:00
if ( stsch_reset ( schid , & schib ) )
2006-12-04 17:40:26 +03:00
return - ENXIO ;
2008-01-26 16:10:45 +03:00
if ( ( schib . pmcw . st = = SUBCHANNEL_TYPE_IO ) & & schib . pmcw . dnv & &
2006-12-04 17:40:26 +03:00
( schib . pmcw . dev = = match_id - > devid . devno ) & &
( schid . ssid = = match_id - > devid . ssid ) ) {
match_id - > schid = schid ;
match_id - > rc = 0 ;
return 1 ;
}
return 0 ;
}
static int reipl_find_schid ( struct ccw_dev_id * devid ,
struct subchannel_id * schid )
2005-04-17 02:20:36 +04:00
{
2006-09-20 17:58:49 +04:00
struct sch_match_id match_id ;
match_id . devid = * devid ;
match_id . rc = - ENODEV ;
2006-12-04 17:40:26 +03:00
for_each_subchannel ( __reipl_subchannel_match , & match_id ) ;
2006-09-20 17:58:49 +04:00
if ( match_id . rc = = 0 )
* schid = match_id . schid ;
return match_id . rc ;
2005-04-17 02:20:36 +04:00
}
2006-09-20 17:58:49 +04:00
extern void do_reipl_asm ( __u32 schid ) ;
2005-04-17 02:20:36 +04:00
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
2006-09-20 17:58:49 +04:00
void reipl_ccw_dev ( struct ccw_dev_id * devid )
2005-04-17 02:20:36 +04:00
{
2006-09-20 17:58:49 +04:00
struct subchannel_id schid ;
2011-10-30 18:16:40 +04:00
s390_reset_system ( NULL , NULL ) ;
2006-12-04 17:40:26 +03:00
if ( reipl_find_schid ( devid , & schid ) ! = 0 )
2006-09-20 17:58:49 +04:00
panic ( " IPL Device not found \n " ) ;
do_reipl_asm ( * ( ( __u32 * ) & schid ) ) ;
2005-04-17 02:20:36 +04:00
}
2006-09-20 17:59:15 +04:00
2007-04-27 18:01:25 +04:00
int __init cio_get_iplinfo ( struct cio_iplinfo * iplinfo )
2006-09-20 17:59:15 +04:00
{
struct subchannel_id schid ;
2007-04-27 18:01:25 +04:00
struct schib schib ;
2006-09-20 17:59:15 +04:00
2010-02-27 00:37:43 +03:00
schid = * ( struct subchannel_id * ) & S390_lowcore . subchannel_id ;
2006-09-20 17:59:15 +04:00
if ( ! schid . one )
2007-04-27 18:01:25 +04:00
return - ENODEV ;
2010-04-22 19:17:05 +04:00
if ( stsch_err ( schid , & schib ) )
2007-04-27 18:01:25 +04:00
return - ENODEV ;
2008-01-26 16:10:45 +03:00
if ( schib . pmcw . st ! = SUBCHANNEL_TYPE_IO )
return - ENODEV ;
2007-04-27 18:01:25 +04:00
if ( ! schib . pmcw . dnv )
return - ENODEV ;
iplinfo - > devno = schib . pmcw . dev ;
iplinfo - > is_qdio = schib . pmcw . qf ;
return 0 ;
2006-09-20 17:59:15 +04:00
}
2008-07-14 11:58:51 +04:00
/**
* cio_tm_start_key - perform start function
* @ sch : subchannel on which to perform the start function
* @ tcw : transport - command word to be started
* @ lpm : mask of paths to use
* @ key : storage key to use for storage access
*
* Start the tcw on the given subchannel . Return zero on success , non - zero
* otherwise .
*/
int cio_tm_start_key ( struct subchannel * sch , struct tcw * tcw , u8 lpm , u8 key )
{
int cc ;
union orb * orb = & to_io_private ( sch ) - > orb ;
memset ( orb , 0 , sizeof ( union orb ) ) ;
orb - > tm . intparm = ( u32 ) ( addr_t ) sch ;
orb - > tm . key = key > > 4 ;
orb - > tm . b = 1 ;
orb - > tm . lpm = lpm ? lpm : sch - > lpm ;
orb - > tm . tcw = ( u32 ) ( addr_t ) tcw ;
cc = ssch ( sch - > schid , orb ) ;
switch ( cc ) {
case 0 :
return 0 ;
case 1 :
case 2 :
return - EBUSY ;
default :
return cio_start_handle_notoper ( sch , lpm ) ;
}
}
/**
* cio_tm_intrg - perform interrogate function
* @ sch - subchannel on which to perform the interrogate function
*
* If the specified subchannel is running in transport - mode , perform the
* interrogate function . Return zero on success , non - zero otherwie .
*/
int cio_tm_intrg ( struct subchannel * sch )
{
int cc ;
if ( ! to_io_private ( sch ) - > orb . tm . b )
return - EINVAL ;
cc = xsch ( sch - > schid ) ;
switch ( cc ) {
case 0 :
case 2 :
return 0 ;
case 1 :
return - EBUSY ;
default :
return - ENODEV ;
}
}