2005-04-17 02:20:36 +04:00
/*
* drivers / s390 / cio / cio . c
* S / 390 common I / O routines - - low level i / o calls
*
* Copyright ( C ) 1999 - 2002 IBM Deutschland Entwicklung GmbH ,
* IBM Corporation
* Author ( s ) : Ingo Adlung ( adlung @ de . ibm . com )
2006-01-15 00:21:04 +03:00
* Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-17 02:20:36 +04:00
* Arnd Bergmann ( arndb @ de . ibm . com )
* Martin Schwidefsky ( schwidefsky @ de . ibm . com )
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/slab.h>
# include <linux/device.h>
# include <linux/kernel_stat.h>
# include <linux/interrupt.h>
# include <asm/cio.h>
# include <asm/delay.h>
# include <asm/irq.h>
# include "airq.h"
# include "cio.h"
# include "css.h"
# include "chsc.h"
# include "ioasm.h"
# include "blacklist.h"
# include "cio_debug.h"
debug_info_t * cio_debug_msg_id ;
debug_info_t * cio_debug_trace_id ;
debug_info_t * cio_debug_crw_id ;
int cio_show_msg ;
static int __init
cio_setup ( char * parm )
{
if ( ! strcmp ( parm , " yes " ) )
cio_show_msg = 1 ;
else if ( ! strcmp ( parm , " no " ) )
cio_show_msg = 0 ;
else
printk ( KERN_ERR " cio_setup : invalid cio_msg parameter '%s' " ,
parm ) ;
return 1 ;
}
__setup ( " cio_msg= " , cio_setup ) ;
/*
* Function : cio_debug_init
* Initializes three debug logs ( under / proc / s390dbf ) for common I / O :
* - cio_msg logs the messages which are printk ' ed when CONFIG_DEBUG_IO is on
* - cio_trace logs the calling of different functions
* - cio_crw logs the messages which are printk ' ed when CONFIG_DEBUG_CRW is on
* debug levels depend on CONFIG_DEBUG_IO resp . CONFIG_DEBUG_CRW
*/
static int __init
cio_debug_init ( void )
{
2005-06-26 01:55:33 +04:00
cio_debug_msg_id = debug_register ( " cio_msg " , 16 , 4 , 16 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_msg_id )
goto out_unregister ;
debug_register_view ( cio_debug_msg_id , & debug_sprintf_view ) ;
debug_set_level ( cio_debug_msg_id , 2 ) ;
2006-04-11 09:53:46 +04:00
cio_debug_trace_id = debug_register ( " cio_trace " , 16 , 4 , 16 ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_trace_id )
goto out_unregister ;
debug_register_view ( cio_debug_trace_id , & debug_hex_ascii_view ) ;
debug_set_level ( cio_debug_trace_id , 2 ) ;
2005-06-26 01:55:33 +04:00
cio_debug_crw_id = debug_register ( " cio_crw " , 4 , 4 , 16 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( ! cio_debug_crw_id )
goto out_unregister ;
debug_register_view ( cio_debug_crw_id , & debug_sprintf_view ) ;
debug_set_level ( cio_debug_crw_id , 2 ) ;
pr_debug ( " debugging initialized \n " ) ;
return 0 ;
out_unregister :
if ( cio_debug_msg_id )
debug_unregister ( cio_debug_msg_id ) ;
if ( cio_debug_trace_id )
debug_unregister ( cio_debug_trace_id ) ;
if ( cio_debug_crw_id )
debug_unregister ( cio_debug_crw_id ) ;
pr_debug ( " could not initialize debugging \n " ) ;
return - 1 ;
}
arch_initcall ( cio_debug_init ) ;
int
cio_set_options ( struct subchannel * sch , int flags )
{
sch - > options . suspend = ( flags & DOIO_ALLOW_SUSPEND ) ! = 0 ;
sch - > options . prefetch = ( flags & DOIO_DENY_PREFETCH ) ! = 0 ;
sch - > options . inter = ( flags & DOIO_SUPPRESS_INTER ) ! = 0 ;
return 0 ;
}
/* FIXME: who wants to use this? */
int
cio_get_options ( struct subchannel * sch )
{
int flags ;
flags = 0 ;
if ( sch - > options . suspend )
flags | = DOIO_ALLOW_SUSPEND ;
if ( sch - > options . prefetch )
flags | = DOIO_DENY_PREFETCH ;
if ( sch - > options . inter )
flags | = DOIO_SUPPRESS_INTER ;
return flags ;
}
/*
* Use tpi to get a pending interrupt , call the interrupt handler and
* return a pointer to the subchannel structure .
*/
static inline int
cio_tpi ( void )
{
struct tpi_info * tpi_info ;
struct subchannel * sch ;
struct irb * irb ;
tpi_info = ( struct tpi_info * ) __LC_SUBCHANNEL_ID ;
if ( tpi ( NULL ) ! = 1 )
return 0 ;
irb = ( struct irb * ) __LC_IRB ;
/* Store interrupt response block to lowcore. */
2006-01-06 11:19:21 +03:00
if ( tsch ( tpi_info - > schid , irb ) ! = 0 )
2005-04-17 02:20:36 +04:00
/* Not status pending or not operational. */
return 1 ;
sch = ( struct subchannel * ) ( unsigned long ) tpi_info - > intparm ;
if ( ! sch )
return 1 ;
local_bh_disable ( ) ;
irq_enter ( ) ;
spin_lock ( & sch - > lock ) ;
memcpy ( & sch - > schib . scsw , & irb - > scsw , sizeof ( struct scsw ) ) ;
if ( sch - > driver & & sch - > driver - > irq )
sch - > driver - > irq ( & sch - > dev ) ;
spin_unlock ( & sch - > lock ) ;
irq_exit ( ) ;
2006-07-03 11:24:46 +04:00
_local_bh_enable ( ) ;
2005-04-17 02:20:36 +04:00
return 1 ;
}
static inline int
cio_start_handle_notoper ( struct subchannel * sch , __u8 lpm )
{
char dbf_text [ 15 ] ;
if ( lpm ! = 0 )
sch - > lpm & = ~ lpm ;
else
sch - > lpm = 0 ;
2006-01-06 11:19:21 +03:00
stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
CIO_MSG_EVENT ( 0 , " cio_start: 'not oper' status for "
2006-01-06 11:19:25 +03:00
" subchannel 0.%x.%04x! \n " , sch - > schid . ssid ,
sch - > schid . sch_no ) ;
2005-04-17 02:20:36 +04:00
sprintf ( dbf_text , " no%s " , sch - > dev . bus_id ) ;
CIO_TRACE_EVENT ( 0 , dbf_text ) ;
CIO_HEX_EVENT ( 0 , & sch - > schib , sizeof ( struct schib ) ) ;
return ( sch - > lpm ? - EACCES : - ENODEV ) ;
}
int
cio_start_key ( struct subchannel * sch , /* subchannel structure */
struct ccw1 * cpa , /* logical channel prog addr */
__u8 lpm , /* logical path mask */
__u8 key ) /* storage key */
{
char dbf_txt [ 15 ] ;
int ccode ;
CIO_TRACE_EVENT ( 4 , " stIO " ) ;
CIO_TRACE_EVENT ( 4 , sch - > dev . bus_id ) ;
/* sch is always under 2G. */
sch - > orb . intparm = ( __u32 ) ( unsigned long ) sch ;
sch - > orb . fmt = 1 ;
sch - > orb . pfch = sch - > options . prefetch = = 0 ;
sch - > orb . spnd = sch - > options . suspend ;
sch - > orb . ssic = sch - > options . suspend & & sch - > options . inter ;
sch - > orb . lpm = ( lpm ! = 0 ) ? ( lpm & sch - > opm ) : sch - > lpm ;
2006-01-06 11:19:28 +03:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
/*
* for 64 bit we always support 64 bit IDAWs with 4 k page size only
*/
sch - > orb . c64 = 1 ;
sch - > orb . i2k = 0 ;
# endif
sch - > orb . key = key > > 4 ;
/* issue "Start Subchannel" */
sch - > orb . cpa = ( __u32 ) __pa ( cpa ) ;
2006-01-06 11:19:21 +03:00
ccode = ssch ( sch - > schid , & sch - > orb ) ;
2005-04-17 02:20:36 +04:00
/* process condition code */
sprintf ( dbf_txt , " ccode:%d " , ccode ) ;
CIO_TRACE_EVENT ( 4 , dbf_txt ) ;
switch ( ccode ) {
case 0 :
/*
* initialize device status information
*/
sch - > schib . scsw . actl | = SCSW_ACTL_START_PEND ;
return 0 ;
case 1 : /* status pending */
case 2 : /* busy */
return - EBUSY ;
default : /* device/path not operational */
return cio_start_handle_notoper ( sch , lpm ) ;
}
}
int
cio_start ( struct subchannel * sch , struct ccw1 * cpa , __u8 lpm )
{
2005-05-01 19:58:58 +04:00
return cio_start_key ( sch , cpa , lpm , PAGE_DEFAULT_KEY ) ;
2005-04-17 02:20:36 +04:00
}
/*
* resume suspended I / O operation
*/
int
cio_resume ( struct subchannel * sch )
{
char dbf_txt [ 15 ] ;
int ccode ;
CIO_TRACE_EVENT ( 4 , " resIO " ) ;
CIO_TRACE_EVENT ( 4 , sch - > dev . bus_id ) ;
2006-01-06 11:19:21 +03:00
ccode = rsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
sprintf ( dbf_txt , " ccode:%d " , ccode ) ;
CIO_TRACE_EVENT ( 4 , dbf_txt ) ;
switch ( ccode ) {
case 0 :
sch - > schib . scsw . actl | = SCSW_ACTL_RESUME_PEND ;
return 0 ;
case 1 :
return - EBUSY ;
case 2 :
return - EINVAL ;
default :
/*
* useless to wait for request completion
* as device is no longer operational !
*/
return - ENODEV ;
}
}
/*
* halt I / O operation
*/
int
cio_halt ( struct subchannel * sch )
{
char dbf_txt [ 15 ] ;
int ccode ;
if ( ! sch )
return - ENODEV ;
CIO_TRACE_EVENT ( 2 , " haltIO " ) ;
CIO_TRACE_EVENT ( 2 , sch - > dev . bus_id ) ;
/*
* Issue " Halt subchannel " and process condition code
*/
2006-01-06 11:19:21 +03:00
ccode = hsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
sprintf ( dbf_txt , " ccode:%d " , ccode ) ;
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
switch ( ccode ) {
case 0 :
sch - > schib . scsw . actl | = SCSW_ACTL_HALT_PEND ;
return 0 ;
case 1 : /* status pending */
case 2 : /* busy */
return - EBUSY ;
default : /* device not operational */
return - ENODEV ;
}
}
/*
* Clear I / O operation
*/
int
cio_clear ( struct subchannel * sch )
{
char dbf_txt [ 15 ] ;
int ccode ;
if ( ! sch )
return - ENODEV ;
CIO_TRACE_EVENT ( 2 , " clearIO " ) ;
CIO_TRACE_EVENT ( 2 , sch - > dev . bus_id ) ;
/*
* Issue " Clear subchannel " and process condition code
*/
2006-01-06 11:19:21 +03:00
ccode = csch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
sprintf ( dbf_txt , " ccode:%d " , ccode ) ;
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
switch ( ccode ) {
case 0 :
sch - > schib . scsw . actl | = SCSW_ACTL_CLEAR_PEND ;
return 0 ;
default : /* device not operational */
return - ENODEV ;
}
}
/*
* Function : cio_cancel
* Issues a " Cancel Subchannel " on the specified subchannel
* Note : We don ' t need any fancy intparms and flags here
* since xsch is executed synchronously .
* Only for common I / O internal use as for now .
*/
int
cio_cancel ( struct subchannel * sch )
{
char dbf_txt [ 15 ] ;
int ccode ;
if ( ! sch )
return - ENODEV ;
CIO_TRACE_EVENT ( 2 , " cancelIO " ) ;
CIO_TRACE_EVENT ( 2 , sch - > dev . bus_id ) ;
2006-01-06 11:19:21 +03:00
ccode = xsch ( sch - > schid ) ;
2005-04-17 02:20:36 +04:00
sprintf ( dbf_txt , " ccode:%d " , ccode ) ;
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
switch ( ccode ) {
case 0 : /* success */
/* Update information in scsw. */
2006-01-06 11:19:21 +03:00
stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
case 1 : /* status pending */
return - EBUSY ;
case 2 : /* not applicable */
return - EINVAL ;
default : /* not oper */
return - ENODEV ;
}
}
/*
* Function : cio_modify
* Issues a " Modify Subchannel " on the specified subchannel
*/
int
cio_modify ( struct subchannel * sch )
{
int ccode , retry , ret ;
ret = 0 ;
for ( retry = 0 ; retry < 5 ; retry + + ) {
2006-01-06 11:19:21 +03:00
ccode = msch_err ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( ccode < 0 ) /* -EIO if msch gets a program check. */
return ccode ;
switch ( ccode ) {
case 0 : /* successfull */
return 0 ;
case 1 : /* status pending */
return - EBUSY ;
case 2 : /* busy */
udelay ( 100 ) ; /* allow for recovery */
ret = - EBUSY ;
break ;
case 3 : /* not operational */
return - ENODEV ;
}
}
return ret ;
}
/*
* Enable subchannel .
*/
int
cio_enable_subchannel ( struct subchannel * sch , unsigned int isc )
{
char dbf_txt [ 15 ] ;
int ccode ;
int retry ;
int ret ;
CIO_TRACE_EVENT ( 2 , " ensch " ) ;
CIO_TRACE_EVENT ( 2 , sch - > dev . bus_id ) ;
2006-01-06 11:19:21 +03:00
ccode = stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( ccode )
return - ENODEV ;
for ( retry = 5 , ret = 0 ; retry > 0 ; retry - - ) {
sch - > schib . pmcw . ena = 1 ;
sch - > schib . pmcw . isc = isc ;
sch - > schib . pmcw . intparm = ( __u32 ) ( unsigned long ) sch ;
ret = cio_modify ( sch ) ;
if ( ret = = - ENODEV )
break ;
if ( ret = = - EIO )
/*
* Got a program check in cio_modify . Try without
* the concurrent sense bit the next time .
*/
sch - > schib . pmcw . csense = 0 ;
if ( ret = = 0 ) {
2006-01-06 11:19:21 +03:00
stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( sch - > schib . pmcw . ena )
break ;
}
if ( ret = = - EBUSY ) {
struct irb irb ;
2006-01-06 11:19:21 +03:00
if ( tsch ( sch - > schid , & irb ) ! = 0 )
2005-04-17 02:20:36 +04:00
break ;
}
}
sprintf ( dbf_txt , " ret:%d " , ret ) ;
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
return ret ;
}
/*
* Disable subchannel .
*/
int
cio_disable_subchannel ( struct subchannel * sch )
{
char dbf_txt [ 15 ] ;
int ccode ;
int retry ;
int ret ;
CIO_TRACE_EVENT ( 2 , " dissch " ) ;
CIO_TRACE_EVENT ( 2 , sch - > dev . bus_id ) ;
2006-01-06 11:19:21 +03:00
ccode = stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( ccode = = 3 ) /* Not operational. */
return - ENODEV ;
if ( sch - > schib . scsw . actl ! = 0 )
/*
* the disable function must not be called while there are
* requests pending for completion !
*/
return - EBUSY ;
for ( retry = 5 , ret = 0 ; retry > 0 ; retry - - ) {
sch - > schib . pmcw . ena = 0 ;
ret = cio_modify ( sch ) ;
if ( ret = = - ENODEV )
break ;
if ( ret = = - EBUSY )
/*
* The subchannel is busy or status pending .
* We ' ll disable when the next interrupt was delivered
* via the state machine .
*/
break ;
if ( ret = = 0 ) {
2006-01-06 11:19:21 +03:00
stsch ( sch - > schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( ! sch - > schib . pmcw . ena )
break ;
}
}
sprintf ( dbf_txt , " ret:%d " , ret ) ;
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
return ret ;
}
/*
* cio_validate_subchannel ( )
*
* Find out subchannel type and initialize struct subchannel .
* Return codes :
* SUBCHANNEL_TYPE_IO for a normal io subchannel
* SUBCHANNEL_TYPE_CHSC for a chsc subchannel
* SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel
* SUBCHANNEL_TYPE_ADM for a adm ( ? ) subchannel
* - ENXIO for non - defined subchannels
* - ENODEV for subchannels with invalid device number or blacklisted devices
*/
int
2006-01-06 11:19:21 +03:00
cio_validate_subchannel ( struct subchannel * sch , struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
char dbf_txt [ 15 ] ;
int ccode ;
2006-01-06 11:19:21 +03:00
sprintf ( dbf_txt , " valsch%x " , schid . sch_no ) ;
2005-04-17 02:20:36 +04:00
CIO_TRACE_EVENT ( 4 , dbf_txt ) ;
/* Nuke all fields. */
memset ( sch , 0 , sizeof ( struct subchannel ) ) ;
spin_lock_init ( & sch - > lock ) ;
/* Set a name for the subchannel */
2006-01-06 11:19:25 +03:00
snprintf ( sch - > dev . bus_id , BUS_ID_SIZE , " 0.%x.%04x " , schid . ssid ,
schid . sch_no ) ;
2005-04-17 02:20:36 +04:00
/*
* The first subchannel that is not - operational ( ccode = = 3 )
* indicates that there aren ' t any more devices available .
2006-01-06 11:19:25 +03:00
* If stsch gets an exception , it means the current subchannel set
* is not valid .
2005-04-17 02:20:36 +04:00
*/
2006-01-06 11:19:25 +03:00
ccode = stsch_err ( schid , & sch - > schib ) ;
2005-04-17 02:20:36 +04:00
if ( ccode )
2006-01-06 11:19:25 +03:00
return ( ccode = = 3 ) ? - ENXIO : ccode ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:19:21 +03:00
sch - > schid = schid ;
2005-04-17 02:20:36 +04:00
/* Copy subchannel type from path management control word. */
sch - > st = sch - > schib . pmcw . st ;
/*
* . . . just being curious we check for non I / O subchannels
*/
if ( sch - > st ! = 0 ) {
CIO_DEBUG ( KERN_INFO , 0 ,
2006-01-06 11:19:25 +03:00
" Subchannel 0.%x.%04x reports "
2005-04-17 02:20:36 +04:00
" non-I/O subchannel type %04X \n " ,
2006-01-06 11:19:25 +03:00
sch - > schid . ssid , sch - > schid . sch_no , sch - > st ) ;
2005-04-17 02:20:36 +04:00
/* We stop here for non-io subchannels. */
return sch - > st ;
}
/* Initialization for io subchannels. */
if ( ! sch - > schib . pmcw . dnv )
/* io subchannel but device number is invalid. */
return - ENODEV ;
/* Devno is valid. */
2006-01-06 11:19:25 +03:00
if ( is_blacklisted ( sch - > schid . ssid , sch - > schib . pmcw . dev ) ) {
2005-04-17 02:20:36 +04:00
/*
* This device must not be known to Linux . So we simply
* say that there is no device and return ENODEV .
*/
CIO_MSG_EVENT ( 0 , " Blacklisted device detected "
2006-01-06 11:19:25 +03:00
" at devno %04X, subchannel set %x \n " ,
sch - > schib . pmcw . dev , sch - > schid . ssid ) ;
2005-04-17 02:20:36 +04:00
return - ENODEV ;
}
sch - > opm = 0xff ;
2006-01-06 11:19:25 +03:00
if ( ! cio_is_console ( sch - > schid ) )
chsc_validate_chpids ( sch ) ;
2005-04-17 02:20:36 +04:00
sch - > lpm = sch - > schib . pmcw . pim &
sch - > schib . pmcw . pam &
sch - > schib . pmcw . pom &
sch - > opm ;
CIO_DEBUG ( KERN_INFO , 0 ,
2006-01-06 11:19:25 +03:00
" Detected device %04x on subchannel 0.%x.%04X "
2005-04-17 02:20:36 +04:00
" - PIM = %02X, PAM = %02X, POM = %02X \n " ,
2006-01-06 11:19:25 +03:00
sch - > schib . pmcw . dev , sch - > schid . ssid ,
sch - > schid . sch_no , sch - > schib . pmcw . pim ,
2005-04-17 02:20:36 +04:00
sch - > schib . pmcw . pam , sch - > schib . pmcw . pom ) ;
/*
* We now have to initially . . .
* . . . set " interruption subclass "
* . . . enable " concurrent sense "
* . . . enable " multipath mode " if more than one
* CHPID is available . This is done regardless
* whether multiple paths are available for us .
*/
sch - > schib . pmcw . isc = 3 ; /* could be smth. else */
sch - > schib . pmcw . csense = 1 ; /* concurrent sense */
sch - > schib . pmcw . ena = 0 ;
if ( ( sch - > lpm & ( sch - > lpm - 1 ) ) ! = 0 )
sch - > schib . pmcw . mp = 1 ; /* multipath mode */
return 0 ;
}
/*
* do_IRQ ( ) handles all normal I / O device IRQ ' s ( the special
* SMP cross - CPU interrupts have their own specific
* handlers ) .
*
*/
void
do_IRQ ( struct pt_regs * regs )
{
struct tpi_info * tpi_info ;
struct subchannel * sch ;
struct irb * irb ;
irq_enter ( ) ;
asm volatile ( " mc 0,0 " ) ;
if ( S390_lowcore . int_clock > = S390_lowcore . jiffy_timer )
/**
* Make sure that the i / o interrupt did not " overtake "
* the last HZ timer interrupt .
*/
account_ticks ( regs ) ;
/*
* Get interrupt information from lowcore
*/
tpi_info = ( struct tpi_info * ) __LC_SUBCHANNEL_ID ;
irb = ( struct irb * ) __LC_IRB ;
do {
kstat_cpu ( smp_processor_id ( ) ) . irqs [ IO_INTERRUPT ] + + ;
/*
* Non I / O - subchannel thin interrupts are processed differently
*/
if ( tpi_info - > adapter_IO = = 1 & &
tpi_info - > int_type = = IO_INTERRUPT_TYPE ) {
do_adapter_IO ( ) ;
continue ;
}
sch = ( struct subchannel * ) ( unsigned long ) tpi_info - > intparm ;
if ( sch )
spin_lock ( & sch - > lock ) ;
/* Store interrupt response block to lowcore. */
2006-01-06 11:19:21 +03:00
if ( tsch ( tpi_info - > schid , irb ) = = 0 & & sch ) {
2005-04-17 02:20:36 +04:00
/* Keep subchannel information word up to date. */
memcpy ( & sch - > schib . scsw , & irb - > scsw ,
sizeof ( irb - > scsw ) ) ;
/* Call interrupt handler if there is one. */
if ( sch - > driver & & sch - > driver - > irq )
sch - > driver - > irq ( & sch - > dev ) ;
}
if ( sch )
spin_unlock ( & sch - > lock ) ;
/*
* Are more interrupts pending ?
* If so , the tpi instruction will update the lowcore
* to hold the info for the next interrupt .
* We don ' t do this for VM because a tpi drops the cpu
* out of the sie which costs more cycles than it saves .
*/
} while ( ! MACHINE_IS_VM & & tpi ( NULL ) ! = 0 ) ;
irq_exit ( ) ;
}
# ifdef CONFIG_CCW_CONSOLE
static struct subchannel console_subchannel ;
static int console_subchannel_in_use ;
/*
* busy wait for the next interrupt on the console
*/
void
wait_cons_dev ( void )
{
unsigned long cr6 __attribute__ ( ( aligned ( 8 ) ) ) ;
unsigned long save_cr6 __attribute__ ( ( aligned ( 8 ) ) ) ;
/*
* before entering the spinlock we may already have
* processed the interrupt on a different CPU . . .
*/
if ( ! console_subchannel_in_use )
return ;
/* disable all but isc 7 (console device) */
__ctl_store ( save_cr6 , 6 , 6 ) ;
cr6 = 0x01000000 ;
__ctl_load ( cr6 , 6 , 6 ) ;
do {
spin_unlock ( & console_subchannel . lock ) ;
if ( ! cio_tpi ( ) )
cpu_relax ( ) ;
spin_lock ( & console_subchannel . lock ) ;
} while ( console_subchannel . schib . scsw . actl ! = 0 ) ;
/*
* restore previous isc value
*/
__ctl_load ( save_cr6 , 6 , 6 ) ;
}
static int
2006-01-06 11:19:22 +03:00
cio_test_for_console ( struct subchannel_id schid , void * data )
{
2006-01-06 11:19:25 +03:00
if ( stsch_err ( schid , & console_subchannel . schib ) ! = 0 )
2006-01-06 11:19:22 +03:00
return - ENXIO ;
if ( console_subchannel . schib . pmcw . dnv & &
console_subchannel . schib . pmcw . dev = =
console_devno ) {
console_irq = schid . sch_no ;
return 1 ; /* found */
}
return 0 ;
}
static int
cio_get_console_sch_no ( void )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:21 +03:00
struct subchannel_id schid ;
2005-04-17 02:20:36 +04:00
2006-01-06 11:19:21 +03:00
init_subchannel_id ( & schid ) ;
2005-04-17 02:20:36 +04:00
if ( console_irq ! = - 1 ) {
/* VM provided us with the irq number of the console. */
2006-01-06 11:19:21 +03:00
schid . sch_no = console_irq ;
if ( stsch ( schid , & console_subchannel . schib ) ! = 0 | |
2005-04-17 02:20:36 +04:00
! console_subchannel . schib . pmcw . dnv )
return - 1 ;
console_devno = console_subchannel . schib . pmcw . dev ;
} else if ( console_devno ! = - 1 ) {
/* At least the console device number is known. */
2006-01-06 11:19:22 +03:00
for_each_subchannel ( cio_test_for_console , NULL ) ;
2005-04-17 02:20:36 +04:00
if ( console_irq = = - 1 )
return - 1 ;
} else {
/* unlike in 2.4, we cannot autoprobe here, since
* the channel subsystem is not fully initialized .
* With some luck , the HWC console can take over */
printk ( KERN_WARNING " No ccw console found! \n " ) ;
return - 1 ;
}
return console_irq ;
}
struct subchannel *
cio_probe_console ( void )
{
2006-01-06 11:19:22 +03:00
int sch_no , ret ;
2006-01-06 11:19:21 +03:00
struct subchannel_id schid ;
2005-04-17 02:20:36 +04:00
if ( xchg ( & console_subchannel_in_use , 1 ) ! = 0 )
return ERR_PTR ( - EBUSY ) ;
2006-01-06 11:19:22 +03:00
sch_no = cio_get_console_sch_no ( ) ;
if ( sch_no = = - 1 ) {
2005-04-17 02:20:36 +04:00
console_subchannel_in_use = 0 ;
return ERR_PTR ( - ENODEV ) ;
}
memset ( & console_subchannel , 0 , sizeof ( struct subchannel ) ) ;
2006-01-06 11:19:21 +03:00
init_subchannel_id ( & schid ) ;
2006-01-06 11:19:22 +03:00
schid . sch_no = sch_no ;
2006-01-06 11:19:21 +03:00
ret = cio_validate_subchannel ( & console_subchannel , schid ) ;
2005-04-17 02:20:36 +04:00
if ( ret ) {
console_subchannel_in_use = 0 ;
return ERR_PTR ( - ENODEV ) ;
}
/*
* enable console I / O - interrupt subclass 7
*/
ctl_set_bit ( 6 , 24 ) ;
console_subchannel . schib . pmcw . isc = 7 ;
console_subchannel . schib . pmcw . intparm =
( __u32 ) ( unsigned long ) & console_subchannel ;
ret = cio_modify ( & console_subchannel ) ;
if ( ret ) {
console_subchannel_in_use = 0 ;
return ERR_PTR ( ret ) ;
}
return & console_subchannel ;
}
void
cio_release_console ( void )
{
console_subchannel . schib . pmcw . intparm = 0 ;
cio_modify ( & console_subchannel ) ;
ctl_clear_bit ( 6 , 24 ) ;
console_subchannel_in_use = 0 ;
}
/* Bah... hack to catch console special sausages. */
int
2006-01-06 11:19:21 +03:00
cio_is_console ( struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
if ( ! console_subchannel_in_use )
return 0 ;
2006-01-06 11:19:21 +03:00
return schid_equal ( & schid , & console_subchannel . schid ) ;
2005-04-17 02:20:36 +04:00
}
struct subchannel *
cio_get_console_subchannel ( void )
{
if ( ! console_subchannel_in_use )
return 0 ;
return & console_subchannel ;
}
# endif
static inline int
2006-01-06 11:19:21 +03:00
__disable_subchannel_easy ( struct subchannel_id schid , struct schib * schib )
2005-04-17 02:20:36 +04:00
{
int retry , cc ;
cc = 0 ;
for ( retry = 0 ; retry < 3 ; retry + + ) {
schib - > pmcw . ena = 0 ;
cc = msch ( schid , schib ) ;
if ( cc )
return ( cc = = 3 ? - ENODEV : - EBUSY ) ;
stsch ( schid , schib ) ;
if ( ! schib - > pmcw . ena )
return 0 ;
}
return - EBUSY ; /* uhm... */
}
static inline int
2006-01-06 11:19:21 +03:00
__clear_subchannel_easy ( struct subchannel_id schid )
2005-04-17 02:20:36 +04:00
{
int retry ;
if ( csch ( schid ) )
return - ENODEV ;
for ( retry = 0 ; retry < 20 ; retry + + ) {
struct tpi_info ti ;
if ( tpi ( & ti ) ) {
2006-01-06 11:19:21 +03:00
tsch ( ti . schid , ( struct irb * ) __LC_IRB ) ;
if ( schid_equal ( & ti . schid , & schid ) )
2005-09-04 02:58:01 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
udelay ( 100 ) ;
}
return - EBUSY ;
}
extern void do_reipl ( unsigned long devno ) ;
2006-01-06 11:19:22 +03:00
static int
__shutdown_subchannel_easy ( struct subchannel_id schid , void * data )
{
struct schib schib ;
2006-01-06 11:19:25 +03:00
if ( stsch_err ( schid , & schib ) )
2006-01-06 11:19:22 +03:00
return - ENXIO ;
if ( ! schib . pmcw . ena )
return 0 ;
switch ( __disable_subchannel_easy ( schid , & schib ) ) {
case 0 :
case - ENODEV :
break ;
default : /* -EBUSY */
if ( __clear_subchannel_easy ( schid ) )
break ; /* give up... */
stsch ( schid , & schib ) ;
__disable_subchannel_easy ( schid , & schib ) ;
}
return 0 ;
}
2005-04-17 02:20:36 +04:00
void
clear_all_subchannels ( void )
{
local_irq_disable ( ) ;
2006-01-06 11:19:22 +03:00
for_each_subchannel ( __shutdown_subchannel_easy , NULL ) ;
2005-04-17 02:20:36 +04:00
}
/* Make sure all subchannels are quiet before we re-ipl an lpar. */
void
reipl ( unsigned long devno )
{
clear_all_subchannels ( ) ;
do_reipl ( devno ) ;
}