2005-04-16 15:20:36 -07:00
/*
* S / 390 common I / O routines - - channel subsystem call
*
2012-11-29 14:34:48 +01:00
* Copyright IBM Corp . 1999 , 2012
2005-04-16 15:20:36 -07:00
* Author ( s ) : Ingo Adlung ( adlung @ de . ibm . com )
2006-01-14 13:21:04 -08:00
* Cornelia Huck ( cornelia . huck @ de . ibm . com )
2005-04-16 15:20:36 -07:00
* Arnd Bergmann ( arndb @ de . ibm . com )
*/
2008-12-25 13:39:36 +01:00
# define KMSG_COMPONENT "cio"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2005-04-16 15:20:36 -07:00
# include <linux/module.h>
# include <linux/slab.h>
# include <linux/init.h>
# include <linux/device.h>
2012-11-29 14:34:48 +01:00
# include <linux/pci.h>
2005-04-16 15:20:36 -07:00
# include <asm/cio.h>
2007-04-27 16:01:31 +02:00
# include <asm/chpid.h>
2008-07-14 09:59:05 +02:00
# include <asm/chsc.h>
2009-03-26 15:24:01 +01:00
# include <asm/crw.h>
2013-06-05 18:59:22 +02:00
# include <asm/isc.h>
2005-04-16 15:20:36 -07:00
# include "css.h"
# include "cio.h"
# include "cio_debug.h"
# include "ioasm.h"
2007-04-27 16:01:28 +02:00
# include "chp.h"
2005-04-16 15:20:36 -07:00
# include "chsc.h"
static void * sei_page ;
2010-10-25 16:10:29 +02:00
static void * chsc_page ;
static DEFINE_SPINLOCK ( chsc_page_lock ) ;
2005-04-16 15:20:36 -07:00
2008-07-17 17:16:47 +02:00
/**
* chsc_error_from_response ( ) - convert a chsc response to an error
* @ response : chsc response code
*
* Returns an appropriate Linux error code for @ response .
*/
int chsc_error_from_response ( int response )
2008-02-05 16:50:34 +01:00
{
switch ( response ) {
case 0x0001 :
return 0 ;
case 0x0002 :
case 0x0003 :
case 0x0006 :
case 0x0007 :
case 0x0008 :
case 0x000a :
2010-08-09 18:12:50 +02:00
case 0x0104 :
2008-02-05 16:50:34 +01:00
return - EINVAL ;
case 0x0004 :
return - EOPNOTSUPP ;
2012-08-28 16:45:42 +02:00
case 0x000b :
2013-04-24 12:00:23 +02:00
case 0x0107 : /* "Channel busy" for the op 0x003d */
2012-08-28 16:45:42 +02:00
return - EBUSY ;
case 0x0100 :
case 0x0102 :
return - ENOMEM ;
2008-02-05 16:50:34 +01:00
default :
return - EIO ;
}
}
2008-07-17 17:16:47 +02:00
EXPORT_SYMBOL_GPL ( chsc_error_from_response ) ;
2008-02-05 16:50:34 +01:00
2007-04-27 16:01:35 +02:00
struct chsc_ssd_area {
struct chsc_header request ;
u16 : 10 ;
u16 ssid : 2 ;
u16 : 4 ;
u16 f_sch ; /* first subchannel */
u16 : 16 ;
u16 l_sch ; /* last subchannel */
u32 : 32 ;
struct chsc_header response ;
u32 : 32 ;
u8 sch_valid : 1 ;
u8 dev_valid : 1 ;
u8 st : 3 ; /* subchannel type */
u8 zeroes : 3 ;
u8 unit_addr ; /* unit address */
u16 devno ; /* device number */
u8 path_mask ;
u8 fla_valid_mask ;
u16 sch ; /* subchannel */
u8 chpid [ 8 ] ; /* chpids 0-7 */
u16 fla [ 8 ] ; /* full link addresses 0-7 */
} __attribute__ ( ( packed ) ) ;
int chsc_get_ssd_info ( struct subchannel_id schid , struct chsc_ssd_info * ssd )
2005-04-16 15:20:36 -07:00
{
2007-04-27 16:01:35 +02:00
struct chsc_ssd_area * ssd_area ;
int ccode ;
int ret ;
int i ;
int mask ;
2005-04-16 15:20:36 -07:00
2010-10-25 16:10:29 +02:00
spin_lock_irq ( & chsc_page_lock ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
ssd_area = chsc_page ;
2006-03-24 03:15:14 -08:00
ssd_area - > request . length = 0x0010 ;
ssd_area - > request . code = 0x0004 ;
2007-04-27 16:01:35 +02:00
ssd_area - > ssid = schid . ssid ;
ssd_area - > f_sch = schid . sch_no ;
ssd_area - > l_sch = schid . sch_no ;
2005-04-16 15:20:36 -07:00
ccode = chsc ( ssd_area ) ;
2007-04-27 16:01:35 +02:00
/* Check response. */
2005-04-16 15:20:36 -07:00
if ( ccode > 0 ) {
2007-04-27 16:01:35 +02:00
ret = ( ccode = = 3 ) ? - ENODEV : - EBUSY ;
2010-10-25 16:10:29 +02:00
goto out ;
2005-04-16 15:20:36 -07:00
}
2008-02-05 16:50:34 +01:00
ret = chsc_error_from_response ( ssd_area - > response . code ) ;
if ( ret ! = 0 ) {
2007-04-27 16:01:35 +02:00
CIO_MSG_EVENT ( 2 , " chsc: ssd failed for 0.%x.%04x (rc=%04x) \n " ,
schid . ssid , schid . sch_no ,
2005-04-16 15:20:36 -07:00
ssd_area - > response . code ) ;
2010-10-25 16:10:29 +02:00
goto out ;
2005-04-16 15:20:36 -07:00
}
2007-04-27 16:01:35 +02:00
if ( ! ssd_area - > sch_valid ) {
ret = - ENODEV ;
2010-10-25 16:10:29 +02:00
goto out ;
2005-04-16 15:20:36 -07:00
}
2007-04-27 16:01:35 +02:00
/* Copy data */
ret = 0 ;
memset ( ssd , 0 , sizeof ( struct chsc_ssd_info ) ) ;
2008-01-26 14:10:45 +01:00
if ( ( ssd_area - > st ! = SUBCHANNEL_TYPE_IO ) & &
( ssd_area - > st ! = SUBCHANNEL_TYPE_MSG ) )
2010-10-25 16:10:29 +02:00
goto out ;
2007-04-27 16:01:35 +02:00
ssd - > path_mask = ssd_area - > path_mask ;
ssd - > fla_valid_mask = ssd_area - > fla_valid_mask ;
for ( i = 0 ; i < 8 ; i + + ) {
mask = 0x80 > > i ;
if ( ssd_area - > path_mask & mask ) {
chp_id_init ( & ssd - > chpid [ i ] ) ;
ssd - > chpid [ i ] . id = ssd_area - > chpid [ i ] ;
2005-04-16 15:20:36 -07:00
}
2007-04-27 16:01:35 +02:00
if ( ssd_area - > fla_valid_mask & mask )
ssd - > fla [ i ] = ssd_area - > fla [ i ] ;
2005-04-16 15:20:36 -07:00
}
2010-10-25 16:10:29 +02:00
out :
spin_unlock_irq ( & chsc_page_lock ) ;
2005-04-16 15:20:36 -07:00
return ret ;
}
2013-06-05 18:58:35 +02:00
/**
* chsc_ssqd ( ) - store subchannel QDIO data ( SSQD )
* @ schid : id of the subchannel on which SSQD is performed
* @ ssqd : request and response block for SSQD
*
* Returns 0 on success .
*/
int chsc_ssqd ( struct subchannel_id schid , struct chsc_ssqd_area * ssqd )
{
memset ( ssqd , 0 , sizeof ( * ssqd ) ) ;
ssqd - > request . length = 0x0010 ;
ssqd - > request . code = 0x0024 ;
ssqd - > first_sch = schid . sch_no ;
ssqd - > last_sch = schid . sch_no ;
ssqd - > ssid = schid . ssid ;
if ( chsc ( ssqd ) )
return - EIO ;
return chsc_error_from_response ( ssqd - > response . code ) ;
}
EXPORT_SYMBOL_GPL ( chsc_ssqd ) ;
2013-06-05 18:59:22 +02:00
/**
* chsc_sadc ( ) - set adapter device controls ( SADC )
* @ schid : id of the subchannel on which SADC is performed
* @ scssc : request and response block for SADC
* @ summary_indicator_addr : summary indicator address
* @ subchannel_indicator_addr : subchannel indicator address
*
* Returns 0 on success .
*/
int chsc_sadc ( struct subchannel_id schid , struct chsc_scssc_area * scssc ,
u64 summary_indicator_addr , u64 subchannel_indicator_addr )
{
memset ( scssc , 0 , sizeof ( * scssc ) ) ;
scssc - > request . length = 0x0fe0 ;
scssc - > request . code = 0x0021 ;
scssc - > operation_code = 0 ;
scssc - > summary_indicator_addr = summary_indicator_addr ;
scssc - > subchannel_indicator_addr = subchannel_indicator_addr ;
scssc - > ks = PAGE_DEFAULT_KEY > > 4 ;
scssc - > kc = PAGE_DEFAULT_KEY > > 4 ;
scssc - > isc = QDIO_AIRQ_ISC ;
scssc - > schid = schid ;
/* enable the time delay disablement facility */
if ( css_general_characteristics . aif_tdd )
scssc - > word_with_d_bit = 0x10000000 ;
if ( chsc ( scssc ) )
return - EIO ;
return chsc_error_from_response ( scssc - > response . code ) ;
}
EXPORT_SYMBOL_GPL ( chsc_sadc ) ;
2008-01-26 14:10:48 +01:00
static int s390_subchannel_remove_chpid ( struct subchannel * sch , void * data )
2005-04-16 15:20:36 -07:00
{
2006-12-08 15:54:26 +01:00
spin_lock_irq ( sch - > lock ) ;
2008-07-14 09:58:45 +02:00
if ( sch - > driver & & sch - > driver - > chp_event )
if ( sch - > driver - > chp_event ( sch , data , CHP_OFFLINE ) ! = 0 )
2005-04-16 15:20:36 -07:00
goto out_unreg ;
2006-12-08 15:54:26 +01:00
spin_unlock_irq ( sch - > lock ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
2007-04-27 16:01:33 +02:00
2005-04-16 15:20:36 -07:00
out_unreg :
sch - > lpm = 0 ;
2007-04-27 16:01:33 +02:00
spin_unlock_irq ( sch - > lock ) ;
2007-04-27 16:01:34 +02:00
css_schedule_eval ( sch - > schid ) ;
2005-04-16 15:20:36 -07:00
return 0 ;
}
2007-04-27 16:01:28 +02:00
void chsc_chp_offline ( struct chp_id chpid )
2005-04-16 15:20:36 -07:00
{
char dbf_txt [ 15 ] ;
2008-07-14 09:59:02 +02:00
struct chp_link link ;
2005-04-16 15:20:36 -07:00
2007-04-27 16:01:26 +02:00
sprintf ( dbf_txt , " chpr%x.%02x " , chpid . cssid , chpid . id ) ;
2005-04-16 15:20:36 -07:00
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
2007-04-27 16:01:28 +02:00
if ( chp_get_status ( chpid ) < = 0 )
2005-04-16 15:20:36 -07:00
return ;
2008-07-14 09:59:02 +02:00
memset ( & link , 0 , sizeof ( struct chp_link ) ) ;
link . chpid = chpid ;
2008-04-17 07:45:59 +02:00
/* Wait until previous actions have settled. */
css_wait_for_slow_path ( ) ;
2008-07-14 09:59:02 +02:00
for_each_subchannel_staged ( s390_subchannel_remove_chpid , NULL , & link ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-26 14:10:48 +01:00
static int __s390_process_res_acc ( struct subchannel * sch , void * data )
2005-04-16 15:20:36 -07:00
{
2006-12-08 15:54:26 +01:00
spin_lock_irq ( sch - > lock ) ;
2008-07-14 09:58:45 +02:00
if ( sch - > driver & & sch - > driver - > chp_event )
sch - > driver - > chp_event ( sch , data , CHP_ONLINE ) ;
2006-12-08 15:54:26 +01:00
spin_unlock_irq ( sch - > lock ) ;
2008-01-26 14:10:48 +01:00
2006-09-20 15:59:54 +02:00
return 0 ;
2006-01-06 00:19:22 -08:00
}
2008-07-14 09:59:02 +02:00
static void s390_process_res_acc ( struct chp_link * link )
2006-01-06 00:19:22 -08:00
{
2005-04-16 15:20:36 -07:00
char dbf_txt [ 15 ] ;
2008-07-14 09:59:02 +02:00
sprintf ( dbf_txt , " accpr%x.%02x " , link - > chpid . cssid ,
link - > chpid . id ) ;
2005-04-16 15:20:36 -07:00
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
2008-07-14 09:59:02 +02:00
if ( link - > fla ! = 0 ) {
sprintf ( dbf_txt , " fla%x " , link - > fla ) ;
2005-04-16 15:20:36 -07:00
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
}
2008-04-17 07:45:59 +02:00
/* Wait until previous actions have settled. */
css_wait_for_slow_path ( ) ;
2005-04-16 15:20:36 -07:00
/*
* I / O resources may have become accessible .
* Scan through all subchannels that may be concerned and
* do a validation on those .
* The more information we have ( info ) , the less scanning
* will we have to do .
*/
2013-11-26 14:55:56 +01:00
for_each_subchannel_staged ( __s390_process_res_acc , NULL , link ) ;
css_schedule_reprobe ( ) ;
2005-04-16 15:20:36 -07:00
}
static int
__get_chpid_from_lir ( void * data )
{
struct lir {
u8 iq ;
u8 ic ;
u16 sci ;
/* incident-node descriptor */
u32 indesc [ 28 ] ;
/* attached-node descriptor */
u32 andesc [ 28 ] ;
/* incident-specific information */
u32 isinfo [ 28 ] ;
2007-02-05 21:17:40 +01:00
} __attribute__ ( ( packed ) ) * lir ;
2005-04-16 15:20:36 -07:00
2006-10-11 15:31:47 +02:00
lir = data ;
2005-04-16 15:20:36 -07:00
if ( ! ( lir - > iq & 0x80 ) )
/* NULL link incident record */
return - EINVAL ;
if ( ! ( lir - > indesc [ 0 ] & 0xc0000000 ) )
/* node descriptor not valid */
return - EINVAL ;
if ( ! ( lir - > indesc [ 0 ] & 0x10000000 ) )
/* don't handle device-type nodes - FIXME */
return - EINVAL ;
/* Byte 3 contains the chpid. Could also be CTCA, but we don't care */
return ( u16 ) ( lir - > indesc [ 0 ] & 0x000000ff ) ;
}
2012-11-29 14:34:48 +01:00
struct chsc_sei_nt0_area {
u8 flags ;
u8 vf ; /* validity flags */
u8 rs ; /* reporting source */
u8 cc ; /* content code */
u16 fla ; /* full link address */
u16 rsid ; /* reporting source id */
2007-02-05 21:17:42 +01:00
u32 reserved1 ;
u32 reserved2 ;
/* ccdf has to be big enough for a link-incident record */
2012-11-29 14:34:48 +01:00
u8 ccdf [ PAGE_SIZE - 24 - 16 ] ; /* content-code dependent field */
} __packed ;
struct chsc_sei_nt2_area {
u8 flags ; /* p and v bit */
u8 reserved1 ;
u8 reserved2 ;
u8 cc ; /* content code */
u32 reserved3 [ 13 ] ;
u8 ccdf [ PAGE_SIZE - 24 - 56 ] ; /* content-code dependent field */
} __packed ;
2013-01-15 19:02:01 +01:00
# define CHSC_SEI_NT0 (1ULL << 63)
2012-11-29 14:34:48 +01:00
# define CHSC_SEI_NT2 (1ULL << 61)
struct chsc_sei {
struct chsc_header request ;
u32 reserved1 ;
u64 ntsm ; /* notification type mask */
struct chsc_header response ;
2013-01-15 19:02:01 +01:00
u32 : 24 ;
u8 nt ;
2012-11-29 14:34:48 +01:00
union {
struct chsc_sei_nt0_area nt0_area ;
struct chsc_sei_nt2_area nt2_area ;
u8 nt_area [ PAGE_SIZE - 24 ] ;
} u ;
} __packed ;
static void chsc_process_sei_link_incident ( struct chsc_sei_nt0_area * sei_area )
2007-02-05 21:17:42 +01:00
{
2007-04-27 16:01:26 +02:00
struct chp_id chpid ;
int id ;
2007-02-05 21:17:42 +01:00
CIO_CRW_EVENT ( 4 , " chsc: link incident (rs=%02x, rs_id=%04x) \n " ,
sei_area - > rs , sei_area - > rsid ) ;
if ( sei_area - > rs ! = 4 )
2007-04-27 16:01:34 +02:00
return ;
2007-04-27 16:01:26 +02:00
id = __get_chpid_from_lir ( sei_area - > ccdf ) ;
if ( id < 0 )
2007-02-05 21:17:42 +01:00
CIO_CRW_EVENT ( 4 , " chsc: link incident - invalid LIR \n " ) ;
2007-04-27 16:01:26 +02:00
else {
chp_id_init ( & chpid ) ;
chpid . id = id ;
2007-04-27 16:01:28 +02:00
chsc_chp_offline ( chpid ) ;
2007-04-27 16:01:26 +02:00
}
2007-02-05 21:17:42 +01:00
}
2012-11-29 14:34:48 +01:00
static void chsc_process_sei_res_acc ( struct chsc_sei_nt0_area * sei_area )
2005-04-16 15:20:36 -07:00
{
2008-07-14 09:59:02 +02:00
struct chp_link link ;
2007-04-27 16:01:26 +02:00
struct chp_id chpid ;
2007-02-05 21:17:42 +01:00
int status ;
CIO_CRW_EVENT ( 4 , " chsc: resource accessibility event (rs=%02x, "
" rs_id=%04x) \n " , sei_area - > rs , sei_area - > rsid ) ;
if ( sei_area - > rs ! = 4 )
2007-04-27 16:01:34 +02:00
return ;
2007-04-27 16:01:26 +02:00
chp_id_init ( & chpid ) ;
chpid . id = sei_area - > rsid ;
2007-02-05 21:17:42 +01:00
/* allocate a new channel path structure, if needed */
2007-04-27 16:01:28 +02:00
status = chp_get_status ( chpid ) ;
2007-02-05 21:17:42 +01:00
if ( status < 0 )
2007-04-27 16:01:28 +02:00
chp_new ( chpid ) ;
2007-02-05 21:17:42 +01:00
else if ( ! status )
2007-04-27 16:01:34 +02:00
return ;
2008-07-14 09:59:02 +02:00
memset ( & link , 0 , sizeof ( struct chp_link ) ) ;
link . chpid = chpid ;
2007-02-05 21:17:42 +01:00
if ( ( sei_area - > vf & 0xc0 ) ! = 0 ) {
2008-07-14 09:59:02 +02:00
link . fla = sei_area - > fla ;
2007-02-05 21:17:42 +01:00
if ( ( sei_area - > vf & 0xc0 ) = = 0xc0 )
/* full link address */
2008-07-14 09:59:02 +02:00
link . fla_mask = 0xffff ;
2007-02-05 21:17:42 +01:00
else
/* link address */
2008-07-14 09:59:02 +02:00
link . fla_mask = 0xff00 ;
2007-02-05 21:17:42 +01:00
}
2008-07-14 09:59:02 +02:00
s390_process_res_acc ( & link ) ;
2007-02-05 21:17:42 +01:00
}
2012-11-29 14:34:48 +01:00
static void chsc_process_sei_chp_avail ( struct chsc_sei_nt0_area * sei_area )
2011-05-23 10:24:41 +02:00
{
struct channel_path * chp ;
struct chp_id chpid ;
u8 * data ;
int num ;
CIO_CRW_EVENT ( 4 , " chsc: channel path availability information \n " ) ;
if ( sei_area - > rs ! = 0 )
return ;
data = sei_area - > ccdf ;
chp_id_init ( & chpid ) ;
for ( num = 0 ; num < = __MAX_CHPID ; num + + ) {
if ( ! chp_test_bit ( data , num ) )
continue ;
chpid . id = num ;
CIO_CRW_EVENT ( 4 , " Update information for channel path "
" %x.%02x \n " , chpid . cssid , chpid . id ) ;
chp = chpid_to_chp ( chpid ) ;
if ( ! chp ) {
chp_new ( chpid ) ;
continue ;
}
mutex_lock ( & chp - > lock ) ;
2013-03-11 12:58:18 +01:00
chp_update_desc ( chp ) ;
2011-05-23 10:24:41 +02:00
mutex_unlock ( & chp - > lock ) ;
}
}
2007-04-27 16:01:31 +02:00
struct chp_config_data {
u8 map [ 32 ] ;
u8 op ;
u8 pc ;
} ;
2012-11-29 14:34:48 +01:00
static void chsc_process_sei_chp_config ( struct chsc_sei_nt0_area * sei_area )
2007-04-27 16:01:31 +02:00
{
struct chp_config_data * data ;
struct chp_id chpid ;
int num ;
2008-12-25 13:39:36 +01:00
char * events [ 3 ] = { " configure " , " deconfigure " , " cancel deconfigure " } ;
2007-04-27 16:01:31 +02:00
CIO_CRW_EVENT ( 4 , " chsc: channel-path-configuration notification \n " ) ;
if ( sei_area - > rs ! = 0 )
2007-04-27 16:01:34 +02:00
return ;
2007-04-27 16:01:31 +02:00
data = ( struct chp_config_data * ) & ( sei_area - > ccdf ) ;
chp_id_init ( & chpid ) ;
for ( num = 0 ; num < = __MAX_CHPID ; num + + ) {
if ( ! chp_test_bit ( data - > map , num ) )
continue ;
chpid . id = num ;
2008-12-25 13:39:36 +01:00
pr_notice ( " Processing %s for channel path %x.%02x \n " ,
events [ data - > op ] , chpid . cssid , chpid . id ) ;
2007-04-27 16:01:31 +02:00
switch ( data - > op ) {
case 0 :
chp_cfg_schedule ( chpid , 1 ) ;
break ;
case 1 :
chp_cfg_schedule ( chpid , 0 ) ;
break ;
case 2 :
chp_cfg_cancel_deconfigure ( chpid ) ;
break ;
}
}
}
2012-11-29 14:34:48 +01:00
static void chsc_process_sei_scm_change ( struct chsc_sei_nt0_area * sei_area )
2012-08-28 16:47:02 +02:00
{
int ret ;
CIO_CRW_EVENT ( 4 , " chsc: scm change notification \n " ) ;
if ( sei_area - > rs ! = 7 )
return ;
ret = scm_update_information ( ) ;
if ( ret )
CIO_CRW_EVENT ( 0 , " chsc: updating change notification "
" failed (rc=%d). \n " , ret ) ;
}
2013-02-28 12:07:55 +01:00
static void chsc_process_sei_scm_avail ( struct chsc_sei_nt0_area * sei_area )
{
int ret ;
CIO_CRW_EVENT ( 4 , " chsc: scm available information \n " ) ;
if ( sei_area - > rs ! = 7 )
return ;
ret = scm_process_availability_information ( ) ;
if ( ret )
CIO_CRW_EVENT ( 0 , " chsc: process availability information "
" failed (rc=%d). \n " , ret ) ;
}
2012-11-29 14:34:48 +01:00
static void chsc_process_sei_nt2 ( struct chsc_sei_nt2_area * sei_area )
2007-02-05 21:17:42 +01:00
{
2012-11-29 14:34:48 +01:00
switch ( sei_area - > cc ) {
case 1 :
zpci_event_error ( sei_area - > ccdf ) ;
break ;
case 2 :
zpci_event_availability ( sei_area - > ccdf ) ;
break ;
default :
2013-01-15 19:04:39 +01:00
CIO_CRW_EVENT ( 2 , " chsc: sei nt2 unhandled cc=%d \n " ,
2012-11-29 14:34:48 +01:00
sei_area - > cc ) ;
break ;
2007-04-27 16:01:34 +02:00
}
2012-11-29 14:34:48 +01:00
}
static void chsc_process_sei_nt0 ( struct chsc_sei_nt0_area * sei_area )
{
2007-02-05 21:17:42 +01:00
/* which kind of information was stored? */
switch ( sei_area - > cc ) {
case 1 : /* link incident*/
2007-04-27 16:01:34 +02:00
chsc_process_sei_link_incident ( sei_area ) ;
2007-02-05 21:17:42 +01:00
break ;
2011-05-23 10:24:41 +02:00
case 2 : /* i/o resource accessibility */
2007-04-27 16:01:34 +02:00
chsc_process_sei_res_acc ( sei_area ) ;
2007-02-05 21:17:42 +01:00
break ;
2011-05-23 10:24:41 +02:00
case 7 : /* channel-path-availability information */
chsc_process_sei_chp_avail ( sei_area ) ;
break ;
2007-04-27 16:01:31 +02:00
case 8 : /* channel-path-configuration notification */
2007-04-27 16:01:34 +02:00
chsc_process_sei_chp_config ( sei_area ) ;
2007-04-27 16:01:31 +02:00
break ;
2012-08-28 16:47:02 +02:00
case 12 : /* scm change notification */
chsc_process_sei_scm_change ( sei_area ) ;
break ;
2013-02-28 12:07:55 +01:00
case 14 : /* scm available notification */
chsc_process_sei_scm_avail ( sei_area ) ;
break ;
2007-02-05 21:17:42 +01:00
default : /* other stuff */
2013-01-15 19:04:39 +01:00
CIO_CRW_EVENT ( 2 , " chsc: sei nt0 unhandled cc=%d \n " ,
2007-02-05 21:17:42 +01:00
sei_area - > cc ) ;
break ;
}
2013-01-15 19:04:39 +01:00
/* Check if we might have lost some information. */
if ( sei_area - > flags & 0x40 ) {
CIO_CRW_EVENT ( 2 , " chsc: event overflow \n " ) ;
css_schedule_eval_all ( ) ;
}
2007-02-05 21:17:42 +01:00
}
2013-01-15 19:04:39 +01:00
static void chsc_process_event_information ( struct chsc_sei * sei , u64 ntsm )
2012-11-29 14:34:48 +01:00
{
2014-04-15 20:08:01 +02:00
static int ntsm_unsupported ;
while ( true ) {
2012-11-29 14:34:48 +01:00
memset ( sei , 0 , sizeof ( * sei ) ) ;
sei - > request . length = 0x0010 ;
sei - > request . code = 0x000e ;
2014-04-15 20:08:01 +02:00
if ( ! ntsm_unsupported )
sei - > ntsm = ntsm ;
2012-11-29 14:34:48 +01:00
if ( chsc ( sei ) )
break ;
2013-01-15 19:04:39 +01:00
if ( sei - > response . code ! = 0x0001 ) {
2014-04-15 20:08:01 +02:00
CIO_CRW_EVENT ( 2 , " chsc: sei failed (rc=%04x, ntsm=%llx) \n " ,
sei - > response . code , sei - > ntsm ) ;
if ( sei - > response . code = = 3 & & sei - > ntsm ) {
/* Fallback for old firmware. */
ntsm_unsupported = 1 ;
continue ;
}
2012-11-29 14:34:48 +01:00
break ;
}
2013-01-15 19:04:39 +01:00
CIO_CRW_EVENT ( 2 , " chsc: sei successful (nt=%d) \n " , sei - > nt ) ;
switch ( sei - > nt ) {
case 0 :
chsc_process_sei_nt0 ( & sei - > u . nt0_area ) ;
break ;
case 2 :
chsc_process_sei_nt2 ( & sei - > u . nt2_area ) ;
break ;
default :
CIO_CRW_EVENT ( 2 , " chsc: unhandled nt: %d \n " , sei - > nt ) ;
break ;
}
2014-04-15 20:08:01 +02:00
if ( ! ( sei - > u . nt0_area . flags & 0x80 ) )
break ;
}
2012-11-29 14:34:48 +01:00
}
2013-01-15 19:04:39 +01:00
/*
* Handle channel subsystem related CRWs .
* Use store event information to find out what ' s going on .
*
* Note : Access to sei_page is serialized through machine check handler
* thread , so no need for locking .
*/
2008-07-14 09:58:46 +02:00
static void chsc_process_crw ( struct crw * crw0 , struct crw * crw1 , int overflow )
2007-02-05 21:17:42 +01:00
{
2013-01-15 19:04:39 +01:00
struct chsc_sei * sei = sei_page ;
2005-04-16 15:20:36 -07:00
2008-07-14 09:58:46 +02:00
if ( overflow ) {
css_schedule_eval_all ( ) ;
return ;
}
CIO_CRW_EVENT ( 2 , " CRW reports slct=%d, oflw=%d, "
" chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X \n " ,
crw0 - > slct , crw0 - > oflw , crw0 - > chn , crw0 - > rsc , crw0 - > anc ,
crw0 - > erc , crw0 - > rsid ) ;
2005-04-16 15:20:36 -07:00
2008-07-14 09:58:46 +02:00
CIO_TRACE_EVENT ( 2 , " prcss " ) ;
2013-01-15 19:04:39 +01:00
chsc_process_event_information ( sei , CHSC_SEI_NT0 | CHSC_SEI_NT2 ) ;
2005-04-16 15:20:36 -07:00
}
2007-04-27 16:01:34 +02:00
void chsc_chp_online ( struct chp_id chpid )
2006-01-06 00:19:22 -08:00
{
2005-04-16 15:20:36 -07:00
char dbf_txt [ 15 ] ;
2008-07-14 09:59:02 +02:00
struct chp_link link ;
2005-04-16 15:20:36 -07:00
2007-04-27 16:01:26 +02:00
sprintf ( dbf_txt , " cadd%x.%02x " , chpid . cssid , chpid . id ) ;
2005-04-16 15:20:36 -07:00
CIO_TRACE_EVENT ( 2 , dbf_txt ) ;
2008-04-17 07:45:59 +02:00
if ( chp_get_status ( chpid ) ! = 0 ) {
2008-07-14 09:59:02 +02:00
memset ( & link , 0 , sizeof ( struct chp_link ) ) ;
link . chpid = chpid ;
2008-04-17 07:45:59 +02:00
/* Wait until previous actions have settled. */
css_wait_for_slow_path ( ) ;
2008-07-14 09:58:45 +02:00
for_each_subchannel_staged ( __s390_process_res_acc , NULL ,
2008-07-14 09:59:02 +02:00
& link ) ;
2014-02-19 17:43:04 +01:00
css_schedule_reprobe ( ) ;
2008-04-17 07:45:59 +02:00
}
2005-04-16 15:20:36 -07:00
}
2007-04-27 16:01:26 +02:00
static void __s390_subchannel_vary_chpid ( struct subchannel * sch ,
struct chp_id chpid , int on )
2005-04-16 15:20:36 -07:00
{
unsigned long flags ;
2008-07-14 09:59:02 +02:00
struct chp_link link ;
2005-04-16 15:20:36 -07:00
2008-07-14 09:59:02 +02:00
memset ( & link , 0 , sizeof ( struct chp_link ) ) ;
link . chpid = chpid ;
2006-12-08 15:54:26 +01:00
spin_lock_irqsave ( sch - > lock , flags ) ;
2008-07-14 09:58:45 +02:00
if ( sch - > driver & & sch - > driver - > chp_event )
2008-07-14 09:59:02 +02:00
sch - > driver - > chp_event ( sch , & link ,
2008-07-14 09:58:45 +02:00
on ? CHP_VARY_ON : CHP_VARY_OFF ) ;
2006-12-08 15:54:26 +01:00
spin_unlock_irqrestore ( sch - > lock , flags ) ;
2005-04-16 15:20:36 -07:00
}
2008-01-26 14:10:48 +01:00
static int s390_subchannel_vary_chpid_off ( struct subchannel * sch , void * data )
2005-04-16 15:20:36 -07:00
{
2008-01-26 14:10:48 +01:00
struct chp_id * chpid = data ;
2005-04-16 15:20:36 -07:00
__s390_subchannel_vary_chpid ( sch , * chpid , 0 ) ;
return 0 ;
}
2008-01-26 14:10:48 +01:00
static int s390_subchannel_vary_chpid_on ( struct subchannel * sch , void * data )
2005-04-16 15:20:36 -07:00
{
2008-01-26 14:10:48 +01:00
struct chp_id * chpid = data ;
2005-04-16 15:20:36 -07:00
__s390_subchannel_vary_chpid ( sch , * chpid , 1 ) ;
return 0 ;
}
2007-04-27 16:01:28 +02:00
/**
* chsc_chp_vary - propagate channel - path vary operation to subchannels
* @ chpid : channl - path ID
* @ on : non - zero for vary online , zero for vary offline
2005-04-16 15:20:36 -07:00
*/
2007-04-27 16:01:28 +02:00
int chsc_chp_vary ( struct chp_id chpid , int on )
2005-04-16 15:20:36 -07:00
{
2010-10-25 16:10:31 +02:00
struct channel_path * chp = chpid_to_chp ( chpid ) ;
2008-07-14 09:59:02 +02:00
2008-04-17 07:45:59 +02:00
/* Wait until previous actions have settled. */
css_wait_for_slow_path ( ) ;
2005-04-16 15:20:36 -07:00
/*
* Redo PathVerification on the devices the chpid connects to
*/
2010-10-25 16:10:31 +02:00
if ( on ) {
2013-03-11 12:58:18 +01:00
/* Try to update the channel path description. */
chp_update_desc ( chp ) ;
2008-01-26 14:10:48 +01:00
for_each_subchannel_staged ( s390_subchannel_vary_chpid_on ,
2013-11-26 14:55:56 +01:00
NULL , & chpid ) ;
css_schedule_reprobe ( ) ;
2010-10-25 16:10:31 +02:00
} else
2008-01-26 14:10:48 +01:00
for_each_subchannel_staged ( s390_subchannel_vary_chpid_off ,
2011-12-01 13:32:22 +01:00
NULL , & chpid ) ;
2008-01-26 14:10:48 +01:00
2005-04-16 15:20:36 -07:00
return 0 ;
}
2006-03-24 03:15:14 -08:00
static void
chsc_remove_cmg_attr ( struct channel_subsystem * css )
{
int i ;
for ( i = 0 ; i < = __MAX_CHPID ; i + + ) {
if ( ! css - > chps [ i ] )
continue ;
2007-04-27 16:01:28 +02:00
chp_remove_cmg_attr ( css - > chps [ i ] ) ;
2006-03-24 03:15:14 -08:00
}
}
static int
chsc_add_cmg_attr ( struct channel_subsystem * css )
{
int i , ret ;
ret = 0 ;
for ( i = 0 ; i < = __MAX_CHPID ; i + + ) {
if ( ! css - > chps [ i ] )
continue ;
2007-04-27 16:01:28 +02:00
ret = chp_add_cmg_attr ( css - > chps [ i ] ) ;
2006-03-24 03:15:14 -08:00
if ( ret )
goto cleanup ;
}
return ret ;
cleanup :
for ( - - i ; i > = 0 ; i - - ) {
if ( ! css - > chps [ i ] )
continue ;
2007-04-27 16:01:28 +02:00
chp_remove_cmg_attr ( css - > chps [ i ] ) ;
2006-03-24 03:15:14 -08:00
}
return ret ;
}
2010-10-25 16:10:29 +02:00
int __chsc_do_secm ( struct channel_subsystem * css , int enable )
2006-03-24 03:15:14 -08:00
{
struct {
struct chsc_header request ;
u32 operation_code : 2 ;
u32 : 30 ;
u32 key : 4 ;
u32 : 28 ;
u32 zeroes1 ;
u32 cub_addr1 ;
u32 zeroes2 ;
u32 cub_addr2 ;
u32 reserved [ 13 ] ;
struct chsc_header response ;
u32 status : 8 ;
u32 : 4 ;
u32 fmt : 4 ;
u32 : 16 ;
2007-02-05 21:17:40 +01:00
} __attribute__ ( ( packed ) ) * secm_area ;
2006-03-24 03:15:14 -08:00
int ret , ccode ;
2010-10-25 16:10:29 +02:00
spin_lock_irq ( & chsc_page_lock ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
secm_area = chsc_page ;
2006-03-24 03:15:14 -08:00
secm_area - > request . length = 0x0050 ;
secm_area - > request . code = 0x0016 ;
2010-02-26 22:37:30 +01:00
secm_area - > key = PAGE_DEFAULT_KEY > > 4 ;
2006-03-24 03:15:14 -08:00
secm_area - > cub_addr1 = ( u64 ) ( unsigned long ) css - > cub_addr1 ;
secm_area - > cub_addr2 = ( u64 ) ( unsigned long ) css - > cub_addr2 ;
secm_area - > operation_code = enable ? 0 : 1 ;
ccode = chsc ( secm_area ) ;
2010-10-25 16:10:29 +02:00
if ( ccode > 0 ) {
ret = ( ccode = = 3 ) ? - ENODEV : - EBUSY ;
goto out ;
}
2006-03-24 03:15:14 -08:00
switch ( secm_area - > response . code ) {
2008-02-05 16:50:34 +01:00
case 0x0102 :
case 0x0103 :
2006-03-24 03:15:14 -08:00
ret = - EINVAL ;
2009-03-26 15:24:17 +01:00
break ;
2006-03-24 03:15:14 -08:00
default :
2008-02-05 16:50:34 +01:00
ret = chsc_error_from_response ( secm_area - > response . code ) ;
2006-03-24 03:15:14 -08:00
}
2008-02-05 16:50:34 +01:00
if ( ret ! = 0 )
CIO_CRW_EVENT ( 2 , " chsc: secm failed (rc=%04x) \n " ,
secm_area - > response . code ) ;
2010-10-25 16:10:29 +02:00
out :
spin_unlock_irq ( & chsc_page_lock ) ;
2006-03-24 03:15:14 -08:00
return ret ;
}
int
chsc_secm ( struct channel_subsystem * css , int enable )
{
int ret ;
if ( enable & & ! css - > cm_enabled ) {
css - > cub_addr1 = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
css - > cub_addr2 = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! css - > cub_addr1 | | ! css - > cub_addr2 ) {
free_page ( ( unsigned long ) css - > cub_addr1 ) ;
free_page ( ( unsigned long ) css - > cub_addr2 ) ;
return - ENOMEM ;
}
}
2010-10-25 16:10:29 +02:00
ret = __chsc_do_secm ( css , enable ) ;
2006-03-24 03:15:14 -08:00
if ( ! ret ) {
css - > cm_enabled = enable ;
if ( css - > cm_enabled ) {
ret = chsc_add_cmg_attr ( css ) ;
if ( ret ) {
2010-10-25 16:10:29 +02:00
__chsc_do_secm ( css , 0 ) ;
2006-03-24 03:15:14 -08:00
css - > cm_enabled = 0 ;
}
} else
chsc_remove_cmg_attr ( css ) ;
}
2007-04-27 16:01:38 +02:00
if ( ! css - > cm_enabled ) {
2006-03-24 03:15:14 -08:00
free_page ( ( unsigned long ) css - > cub_addr1 ) ;
free_page ( ( unsigned long ) css - > cub_addr2 ) ;
}
return ret ;
}
2008-07-14 09:59:05 +02:00
int chsc_determine_channel_path_desc ( struct chp_id chpid , int fmt , int rfmt ,
2010-10-25 16:10:30 +02:00
int c , int m , void * page )
2005-04-16 15:20:36 -07:00
{
2010-10-25 16:10:30 +02:00
struct chsc_scpd * scpd_area ;
2005-04-16 15:20:36 -07:00
int ccode , ret ;
2008-07-14 09:59:05 +02:00
if ( ( rfmt = = 1 ) & & ! css_general_characteristics . fcs )
return - EINVAL ;
if ( ( rfmt = = 2 ) & & ! css_general_characteristics . cib )
return - EINVAL ;
2005-04-16 15:20:36 -07:00
2010-10-25 16:10:30 +02:00
memset ( page , 0 , PAGE_SIZE ) ;
scpd_area = page ;
2006-03-24 03:15:14 -08:00
scpd_area - > request . length = 0x0010 ;
scpd_area - > request . code = 0x0002 ;
2008-07-14 09:59:05 +02:00
scpd_area - > cssid = chpid . cssid ;
2007-04-27 16:01:26 +02:00
scpd_area - > first_chpid = chpid . id ;
scpd_area - > last_chpid = chpid . id ;
2008-07-14 09:59:05 +02:00
scpd_area - > m = m ;
scpd_area - > c = c ;
scpd_area - > fmt = fmt ;
scpd_area - > rfmt = rfmt ;
2005-04-16 15:20:36 -07:00
ccode = chsc ( scpd_area ) ;
2010-10-25 16:10:30 +02:00
if ( ccode > 0 )
return ( ccode = = 3 ) ? - ENODEV : - EBUSY ;
2005-04-16 15:20:36 -07:00
2008-02-05 16:50:34 +01:00
ret = chsc_error_from_response ( scpd_area - > response . code ) ;
2010-10-25 16:10:30 +02:00
if ( ret )
2008-02-05 16:50:34 +01:00
CIO_CRW_EVENT ( 2 , " chsc: scpd failed (rc=%04x) \n " ,
2005-04-16 15:20:36 -07:00
scpd_area - > response . code ) ;
return ret ;
}
2008-07-14 09:59:05 +02:00
EXPORT_SYMBOL_GPL ( chsc_determine_channel_path_desc ) ;
int chsc_determine_base_channel_path_desc ( struct chp_id chpid ,
struct channel_path_desc * desc )
{
struct chsc_response_struct * chsc_resp ;
2010-10-25 16:10:30 +02:00
struct chsc_scpd * scpd_area ;
2010-10-25 16:10:32 +02:00
unsigned long flags ;
2008-07-14 09:59:05 +02:00
int ret ;
2010-10-25 16:10:32 +02:00
spin_lock_irqsave ( & chsc_page_lock , flags ) ;
2010-10-25 16:10:30 +02:00
scpd_area = chsc_page ;
ret = chsc_determine_channel_path_desc ( chpid , 0 , 0 , 0 , 0 , scpd_area ) ;
2008-07-14 09:59:05 +02:00
if ( ret )
2010-10-25 16:10:30 +02:00
goto out ;
chsc_resp = ( void * ) & scpd_area - > response ;
2010-07-19 09:22:37 +02:00
memcpy ( desc , & chsc_resp - > data , sizeof ( * desc ) ) ;
2010-10-25 16:10:30 +02:00
out :
2010-10-25 16:10:32 +02:00
spin_unlock_irqrestore ( & chsc_page_lock , flags ) ;
2008-07-14 09:59:05 +02:00
return ret ;
}
2005-04-16 15:20:36 -07:00
2011-01-05 12:47:56 +01:00
int chsc_determine_fmt1_channel_path_desc ( struct chp_id chpid ,
struct channel_path_desc_fmt1 * desc )
{
struct chsc_response_struct * chsc_resp ;
struct chsc_scpd * scpd_area ;
2013-03-11 12:58:18 +01:00
unsigned long flags ;
2011-01-05 12:47:56 +01:00
int ret ;
2013-03-11 12:58:18 +01:00
spin_lock_irqsave ( & chsc_page_lock , flags ) ;
2011-01-05 12:47:56 +01:00
scpd_area = chsc_page ;
ret = chsc_determine_channel_path_desc ( chpid , 0 , 0 , 1 , 0 , scpd_area ) ;
if ( ret )
goto out ;
chsc_resp = ( void * ) & scpd_area - > response ;
memcpy ( desc , & chsc_resp - > data , sizeof ( * desc ) ) ;
out :
2013-03-11 12:58:18 +01:00
spin_unlock_irqrestore ( & chsc_page_lock , flags ) ;
2011-01-05 12:47:56 +01:00
return ret ;
}
2006-03-24 03:15:14 -08:00
static void
chsc_initialize_cmg_chars ( struct channel_path * chp , u8 cmcv ,
struct cmg_chars * chars )
{
2010-10-25 16:10:29 +02:00
struct cmg_chars * cmg_chars ;
int i , mask ;
cmg_chars = chp - > cmg_chars ;
for ( i = 0 ; i < NR_MEASUREMENT_CHARS ; i + + ) {
mask = 0x80 > > ( i + 3 ) ;
if ( cmcv & mask )
cmg_chars - > values [ i ] = chars - > values [ i ] ;
else
cmg_chars - > values [ i ] = 0 ;
2006-03-24 03:15:14 -08:00
}
}
2007-04-27 16:01:28 +02:00
int chsc_get_channel_measurement_chars ( struct channel_path * chp )
2006-03-24 03:15:14 -08:00
{
2010-10-25 16:10:29 +02:00
struct cmg_chars * cmg_chars ;
2006-03-24 03:15:14 -08:00
int ccode , ret ;
struct {
struct chsc_header request ;
u32 : 24 ;
u32 first_chpid : 8 ;
u32 : 24 ;
u32 last_chpid : 8 ;
u32 zeroes1 ;
struct chsc_header response ;
u32 zeroes2 ;
u32 not_valid : 1 ;
u32 shared : 1 ;
u32 : 22 ;
u32 chpid : 8 ;
u32 cmcv : 5 ;
u32 : 11 ;
u32 cmgq : 8 ;
u32 cmg : 8 ;
u32 zeroes3 ;
u32 data [ NR_MEASUREMENT_CHARS ] ;
2007-02-05 21:17:40 +01:00
} __attribute__ ( ( packed ) ) * scmc_area ;
2006-03-24 03:15:14 -08:00
2010-10-25 16:10:29 +02:00
chp - > cmg_chars = NULL ;
cmg_chars = kmalloc ( sizeof ( * cmg_chars ) , GFP_KERNEL ) ;
if ( ! cmg_chars )
2006-03-24 03:15:14 -08:00
return - ENOMEM ;
2010-10-25 16:10:29 +02:00
spin_lock_irq ( & chsc_page_lock ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
scmc_area = chsc_page ;
2006-03-24 03:15:14 -08:00
scmc_area - > request . length = 0x0010 ;
scmc_area - > request . code = 0x0022 ;
2007-04-27 16:01:26 +02:00
scmc_area - > first_chpid = chp - > chpid . id ;
scmc_area - > last_chpid = chp - > chpid . id ;
2006-03-24 03:15:14 -08:00
ccode = chsc ( scmc_area ) ;
if ( ccode > 0 ) {
ret = ( ccode = = 3 ) ? - ENODEV : - EBUSY ;
goto out ;
}
2008-02-05 16:50:34 +01:00
ret = chsc_error_from_response ( scmc_area - > response . code ) ;
2010-10-25 16:10:29 +02:00
if ( ret ) {
2008-02-05 16:50:34 +01:00
CIO_CRW_EVENT ( 2 , " chsc: scmc failed (rc=%04x) \n " ,
2006-03-24 03:15:14 -08:00
scmc_area - > response . code ) ;
2010-10-25 16:10:29 +02:00
goto out ;
}
if ( scmc_area - > not_valid ) {
chp - > cmg = - 1 ;
chp - > shared = - 1 ;
goto out ;
}
chp - > cmg = scmc_area - > cmg ;
chp - > shared = scmc_area - > shared ;
if ( chp - > cmg ! = 2 & & chp - > cmg ! = 3 ) {
/* No cmg-dependent data. */
goto out ;
2006-03-24 03:15:14 -08:00
}
2010-10-25 16:10:29 +02:00
chp - > cmg_chars = cmg_chars ;
chsc_initialize_cmg_chars ( chp , scmc_area - > cmcv ,
( struct cmg_chars * ) & scmc_area - > data ) ;
2006-03-24 03:15:14 -08:00
out :
2010-10-25 16:10:29 +02:00
spin_unlock_irq ( & chsc_page_lock ) ;
if ( ! chp - > cmg_chars )
kfree ( cmg_chars ) ;
2006-03-24 03:15:14 -08:00
return ret ;
}
2010-10-25 16:10:28 +02:00
int __init chsc_init ( void )
2005-04-16 15:20:36 -07:00
{
2008-07-14 09:58:46 +02:00
int ret ;
2005-04-16 15:20:36 -07:00
sei_page = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
2010-10-25 16:10:29 +02:00
chsc_page = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sei_page | | ! chsc_page ) {
ret = - ENOMEM ;
goto out_err ;
2008-07-14 09:58:46 +02:00
}
2009-03-26 15:24:01 +01:00
ret = crw_register_handler ( CRW_RSC_CSS , chsc_process_crw ) ;
2008-07-14 09:58:46 +02:00
if ( ret )
2010-10-25 16:10:29 +02:00
goto out_err ;
return ret ;
out_err :
free_page ( ( unsigned long ) chsc_page ) ;
free_page ( ( unsigned long ) sei_page ) ;
2008-07-14 09:58:46 +02:00
return ret ;
2005-04-16 15:20:36 -07:00
}
2010-10-25 16:10:28 +02:00
void __init chsc_init_cleanup ( void )
2007-07-27 12:29:21 +02:00
{
2009-03-26 15:24:01 +01:00
crw_unregister_handler ( CRW_RSC_CSS ) ;
2010-10-25 16:10:29 +02:00
free_page ( ( unsigned long ) chsc_page ) ;
2010-10-25 16:10:28 +02:00
free_page ( ( unsigned long ) sei_page ) ;
2007-07-27 12:29:21 +02:00
}
2010-04-22 17:17:03 +02:00
int chsc_enable_facility ( int operation_code )
2006-01-06 00:19:25 -08:00
{
2010-10-25 16:10:29 +02:00
unsigned long flags ;
2006-01-06 00:19:25 -08:00
int ret ;
2010-10-25 16:10:29 +02:00
struct {
2006-01-06 00:19:25 -08:00
struct chsc_header request ;
u8 reserved1 : 4 ;
u8 format : 4 ;
u8 reserved2 ;
u16 operation_code ;
u32 reserved3 ;
u32 reserved4 ;
u32 operation_data_area [ 252 ] ;
struct chsc_header response ;
u32 reserved5 : 4 ;
u32 format2 : 4 ;
u32 reserved6 : 24 ;
2010-10-25 16:10:29 +02:00
} __attribute__ ( ( packed ) ) * sda_area ;
2006-01-06 00:19:25 -08:00
2010-10-25 16:10:29 +02:00
spin_lock_irqsave ( & chsc_page_lock , flags ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
sda_area = chsc_page ;
sda_area - > request . length = 0x0400 ;
sda_area - > request . code = 0x0031 ;
sda_area - > operation_code = operation_code ;
2006-01-06 00:19:25 -08:00
2010-10-25 16:10:29 +02:00
ret = chsc ( sda_area ) ;
2006-01-06 00:19:25 -08:00
if ( ret > 0 ) {
ret = ( ret = = 3 ) ? - ENODEV : - EBUSY ;
goto out ;
}
2008-02-05 16:50:34 +01:00
2010-10-25 16:10:29 +02:00
switch ( sda_area - > response . code ) {
2008-02-05 16:50:34 +01:00
case 0x0101 :
2006-01-06 00:19:25 -08:00
ret = - EOPNOTSUPP ;
break ;
2008-02-05 16:50:34 +01:00
default :
2010-10-25 16:10:29 +02:00
ret = chsc_error_from_response ( sda_area - > response . code ) ;
2006-01-06 00:19:25 -08:00
}
2008-02-05 16:50:34 +01:00
if ( ret ! = 0 )
CIO_CRW_EVENT ( 2 , " chsc: sda (oc=%x) failed (rc=%04x) \n " ,
2010-10-25 16:10:29 +02:00
operation_code , sda_area - > response . code ) ;
out :
spin_unlock_irqrestore ( & chsc_page_lock , flags ) ;
2006-01-06 00:19:25 -08:00
return ret ;
}
2005-04-16 15:20:36 -07:00
struct css_general_char css_general_characteristics ;
struct css_chsc_char css_chsc_characteristics ;
int __init
chsc_determine_css_characteristics ( void )
{
int result ;
struct {
struct chsc_header request ;
u32 reserved1 ;
u32 reserved2 ;
u32 reserved3 ;
struct chsc_header response ;
u32 reserved4 ;
u32 general_char [ 510 ] ;
2010-10-25 16:10:28 +02:00
u32 chsc_char [ 508 ] ;
2007-02-05 21:17:40 +01:00
} __attribute__ ( ( packed ) ) * scsc_area ;
2005-04-16 15:20:36 -07:00
2010-10-25 16:10:29 +02:00
spin_lock_irq ( & chsc_page_lock ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
scsc_area = chsc_page ;
2006-03-24 03:15:14 -08:00
scsc_area - > request . length = 0x0010 ;
scsc_area - > request . code = 0x0010 ;
2005-04-16 15:20:36 -07:00
result = chsc ( scsc_area ) ;
if ( result ) {
2008-02-05 16:50:34 +01:00
result = ( result = = 3 ) ? - ENODEV : - EBUSY ;
2005-04-16 15:20:36 -07:00
goto exit ;
}
2008-02-05 16:50:34 +01:00
result = chsc_error_from_response ( scsc_area - > response . code ) ;
if ( result = = 0 ) {
memcpy ( & css_general_characteristics , scsc_area - > general_char ,
sizeof ( css_general_characteristics ) ) ;
memcpy ( & css_chsc_characteristics , scsc_area - > chsc_char ,
sizeof ( css_chsc_characteristics ) ) ;
} else
CIO_CRW_EVENT ( 2 , " chsc: scsc failed (rc=%04x) \n " ,
scsc_area - > response . code ) ;
2005-04-16 15:20:36 -07:00
exit :
2010-10-25 16:10:29 +02:00
spin_unlock_irq ( & chsc_page_lock ) ;
2005-04-16 15:20:36 -07:00
return result ;
}
EXPORT_SYMBOL_GPL ( css_general_characteristics ) ;
EXPORT_SYMBOL_GPL ( css_chsc_characteristics ) ;
2008-07-14 09:58:56 +02:00
int chsc_sstpc ( void * page , unsigned int op , u16 ctrl )
{
struct {
struct chsc_header request ;
unsigned int rsvd0 ;
unsigned int op : 8 ;
unsigned int rsvd1 : 8 ;
unsigned int ctrl : 16 ;
unsigned int rsvd2 [ 5 ] ;
struct chsc_header response ;
unsigned int rsvd3 [ 7 ] ;
} __attribute__ ( ( packed ) ) * rr ;
int rc ;
memset ( page , 0 , PAGE_SIZE ) ;
rr = page ;
rr - > request . length = 0x0020 ;
rr - > request . code = 0x0033 ;
rr - > op = op ;
rr - > ctrl = ctrl ;
rc = chsc ( rr ) ;
if ( rc )
return - EIO ;
rc = ( rr - > response . code = = 0x0001 ) ? 0 : - EIO ;
return rc ;
}
int chsc_sstpi ( void * page , void * result , size_t size )
{
struct {
struct chsc_header request ;
unsigned int rsvd0 [ 3 ] ;
struct chsc_header response ;
char data [ size ] ;
} __attribute__ ( ( packed ) ) * rr ;
int rc ;
memset ( page , 0 , PAGE_SIZE ) ;
rr = page ;
rr - > request . length = 0x0010 ;
rr - > request . code = 0x0038 ;
rc = chsc ( rr ) ;
if ( rc )
return - EIO ;
memcpy ( result , & rr - > data , size ) ;
return ( rr - > response . code = = 0x0001 ) ? 0 : - EIO ;
}
2010-08-09 18:12:50 +02:00
int chsc_siosl ( struct subchannel_id schid )
{
2010-10-25 16:10:29 +02:00
struct {
struct chsc_header request ;
u32 word1 ;
struct subchannel_id sid ;
u32 word3 ;
struct chsc_header response ;
u32 word [ 11 ] ;
} __attribute__ ( ( packed ) ) * siosl_area ;
2010-08-09 18:12:50 +02:00
unsigned long flags ;
int ccode ;
int rc ;
2010-10-25 16:10:29 +02:00
spin_lock_irqsave ( & chsc_page_lock , flags ) ;
memset ( chsc_page , 0 , PAGE_SIZE ) ;
siosl_area = chsc_page ;
siosl_area - > request . length = 0x0010 ;
siosl_area - > request . code = 0x0046 ;
siosl_area - > word1 = 0x80000000 ;
siosl_area - > sid = schid ;
2010-08-09 18:12:50 +02:00
2010-10-25 16:10:29 +02:00
ccode = chsc ( siosl_area ) ;
2010-08-09 18:12:50 +02:00
if ( ccode > 0 ) {
if ( ccode = = 3 )
rc = - ENODEV ;
else
rc = - EBUSY ;
CIO_MSG_EVENT ( 2 , " chsc: chsc failed for 0.%x.%04x (ccode=%d) \n " ,
schid . ssid , schid . sch_no , ccode ) ;
goto out ;
}
2010-10-25 16:10:29 +02:00
rc = chsc_error_from_response ( siosl_area - > response . code ) ;
2010-08-09 18:12:50 +02:00
if ( rc )
CIO_MSG_EVENT ( 2 , " chsc: siosl failed for 0.%x.%04x (rc=%04x) \n " ,
schid . ssid , schid . sch_no ,
2010-10-25 16:10:29 +02:00
siosl_area - > response . code ) ;
2010-08-09 18:12:50 +02:00
else
CIO_MSG_EVENT ( 4 , " chsc: siosl succeeded for 0.%x.%04x \n " ,
schid . ssid , schid . sch_no ) ;
out :
2010-10-25 16:10:29 +02:00
spin_unlock_irqrestore ( & chsc_page_lock , flags ) ;
2010-08-09 18:12:50 +02:00
return rc ;
}
EXPORT_SYMBOL_GPL ( chsc_siosl ) ;
2012-08-28 16:45:42 +02:00
/**
* chsc_scm_info ( ) - store SCM information ( SSI )
* @ scm_area : request and response block for SSI
* @ token : continuation token
*
* Returns 0 on success .
*/
int chsc_scm_info ( struct chsc_scm_info * scm_area , u64 token )
{
int ccode , ret ;
memset ( scm_area , 0 , sizeof ( * scm_area ) ) ;
scm_area - > request . length = 0x0020 ;
scm_area - > request . code = 0x004C ;
scm_area - > reqtok = token ;
ccode = chsc ( scm_area ) ;
if ( ccode > 0 ) {
ret = ( ccode = = 3 ) ? - ENODEV : - EBUSY ;
goto out ;
}
ret = chsc_error_from_response ( scm_area - > response . code ) ;
if ( ret ! = 0 )
CIO_MSG_EVENT ( 2 , " chsc: scm info failed (rc=%04x) \n " ,
scm_area - > response . code ) ;
out :
return ret ;
}
EXPORT_SYMBOL_GPL ( chsc_scm_info ) ;
2013-04-24 12:00:23 +02:00
/**
* chsc_pnso_brinfo ( ) - Perform Network - Subchannel Operation , Bridge Info .
* @ schid : id of the subchannel on which PNSO is performed
* @ brinfo_area : request and response block for the operation
* @ resume_token : resume token for multiblock response
* @ cnc : Boolean change - notification control
*
* brinfo_area must be allocated by the caller with get_zeroed_page ( GFP_KERNEL )
*
* Returns 0 on success .
*/
int chsc_pnso_brinfo ( struct subchannel_id schid ,
struct chsc_pnso_area * brinfo_area ,
struct chsc_brinfo_resume_token resume_token ,
int cnc )
{
memset ( brinfo_area , 0 , sizeof ( * brinfo_area ) ) ;
brinfo_area - > request . length = 0x0030 ;
brinfo_area - > request . code = 0x003d ; /* network-subchannel operation */
brinfo_area - > m = schid . m ;
brinfo_area - > ssid = schid . ssid ;
brinfo_area - > sch = schid . sch_no ;
brinfo_area - > cssid = schid . cssid ;
brinfo_area - > oc = 0 ; /* Store-network-bridging-information list */
brinfo_area - > resume_token = resume_token ;
brinfo_area - > n = ( cnc ! = 0 ) ;
if ( chsc ( brinfo_area ) )
return - EIO ;
return chsc_error_from_response ( brinfo_area - > response . code ) ;
}
EXPORT_SYMBOL_GPL ( chsc_pnso_brinfo ) ;