2008-01-26 14:10:56 +01:00
/*
2012-11-29 14:35:47 +01:00
* Copyright IBM Corp . 2007 , 2012
2008-01-26 14:10:56 +01:00
*
2009-06-16 10:30:48 +02:00
* Author ( s ) : Heiko Carstens < heiko . carstens @ de . ibm . com > ,
* Peter Oberparleiter < peter . oberparleiter @ de . ibm . com >
2008-01-26 14:10:56 +01:00
*/
2008-12-25 13:39:48 +01:00
# define KMSG_COMPONENT "sclp_cmd"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2008-01-26 14:10:56 +01:00
# include <linux/completion.h>
# include <linux/init.h>
# include <linux/errno.h>
2009-06-16 10:30:48 +02:00
# include <linux/err.h>
2012-11-29 14:35:47 +01:00
# include <linux/export.h>
2008-01-26 14:10:56 +01:00
# include <linux/slab.h>
# include <linux/string.h>
2008-07-14 09:59:19 +02:00
# include <linux/mm.h>
# include <linux/mmzone.h>
# include <linux/memory.h>
2012-05-15 14:15:25 +02:00
# include <linux/module.h>
2009-06-16 10:30:48 +02:00
# include <linux/platform_device.h>
2012-11-02 12:56:43 +01:00
# include <asm/ctl_reg.h>
2008-01-26 14:10:57 +01:00
# include <asm/chpid.h>
2009-02-19 15:19:01 +01:00
# include <asm/setup.h>
2012-11-02 12:56:43 +01:00
# include <asm/page.h>
# include <asm/sclp.h>
2014-03-06 18:47:21 +01:00
# include <asm/numa.h>
2008-01-26 14:10:56 +01:00
2008-12-25 13:39:48 +01:00
# include "sclp.h"
2008-01-26 14:10:56 +01:00
static void sclp_sync_callback ( struct sclp_req * req , void * data )
{
struct completion * completion = data ;
complete ( completion ) ;
}
2013-06-06 09:52:08 +02:00
int sclp_sync_request ( sclp_cmdw_t cmd , void * sccb )
2014-03-31 16:18:29 +02:00
{
return sclp_sync_request_timeout ( cmd , sccb , 0 ) ;
}
int sclp_sync_request_timeout ( sclp_cmdw_t cmd , void * sccb , int timeout )
2008-01-26 14:10:56 +01:00
{
struct completion completion ;
struct sclp_req * request ;
int rc ;
request = kzalloc ( sizeof ( * request ) , GFP_KERNEL ) ;
if ( ! request )
return - ENOMEM ;
2014-03-31 16:18:29 +02:00
if ( timeout )
request - > queue_timeout = timeout ;
2008-01-26 14:10:56 +01:00
request - > command = cmd ;
request - > sccb = sccb ;
request - > status = SCLP_REQ_FILLED ;
request - > callback = sclp_sync_callback ;
request - > callback_data = & completion ;
init_completion ( & completion ) ;
/* Perform sclp request. */
rc = sclp_add_request ( request ) ;
if ( rc )
goto out ;
wait_for_completion ( & completion ) ;
/* Check response. */
if ( request - > status ! = SCLP_REQ_DONE ) {
2016-03-03 20:49:57 -08:00
pr_warn ( " sync request failed (cmd=0x%08x, status=0x%02x) \n " ,
cmd , request - > status ) ;
2008-01-26 14:10:56 +01:00
rc = - EIO ;
}
out :
kfree ( request ) ;
return rc ;
}
/*
* CPU configuration related functions .
*/
# define SCLP_CMDW_CONFIGURE_CPU 0x00110001
# define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
2016-12-03 09:48:01 +01:00
int _sclp_get_core_info ( struct sclp_core_info * info )
2008-01-26 14:10:56 +01:00
{
int rc ;
struct read_cpu_info_sccb * sccb ;
if ( ! SCLP_HAS_CPU_INFO )
return - EOPNOTSUPP ;
2008-01-26 14:11:05 +01:00
sccb = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
2008-01-26 14:10:56 +01:00
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = sizeof ( * sccb ) ;
2014-03-31 16:18:29 +02:00
rc = sclp_sync_request_timeout ( SCLP_CMDW_READ_CPU_INFO , sccb ,
SCLP_QUEUE_INTERVAL ) ;
2008-01-26 14:10:56 +01:00
if ( rc )
goto out ;
if ( sccb - > header . response_code ! = 0x0010 ) {
2016-03-03 20:49:57 -08:00
pr_warn ( " readcpuinfo failed (response=0x%04x) \n " ,
sccb - > header . response_code ) ;
2008-01-26 14:10:56 +01:00
rc = - EIO ;
goto out ;
}
2015-06-18 14:23:00 +02:00
sclp_fill_core_info ( info , sccb ) ;
2008-01-26 14:10:56 +01:00
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}
struct cpu_configure_sccb {
struct sccb_header header ;
} __attribute__ ( ( packed , aligned ( 8 ) ) ) ;
2015-06-18 14:23:00 +02:00
static int do_core_configure ( sclp_cmdw_t cmd )
2008-01-26 14:10:56 +01:00
{
struct cpu_configure_sccb * sccb ;
int rc ;
if ( ! SCLP_HAS_CPU_RECONFIG )
return - EOPNOTSUPP ;
/*
* This is not going to cross a page boundary since we force
* kmalloc to have a minimum alignment of 8 bytes on s390 .
*/
sccb = kzalloc ( sizeof ( * sccb ) , GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = sizeof ( * sccb ) ;
2014-03-31 16:18:29 +02:00
rc = sclp_sync_request_timeout ( cmd , sccb , SCLP_QUEUE_INTERVAL ) ;
2008-01-26 14:10:56 +01:00
if ( rc )
goto out ;
switch ( sccb - > header . response_code ) {
case 0x0020 :
case 0x0120 :
break ;
default :
2016-03-03 20:49:57 -08:00
pr_warn ( " configure cpu failed (cmd=0x%08x, response=0x%04x) \n " ,
cmd , sccb - > header . response_code ) ;
2008-01-26 14:10:56 +01:00
rc = - EIO ;
break ;
}
out :
kfree ( sccb ) ;
return rc ;
}
2015-06-18 14:23:00 +02:00
int sclp_core_configure ( u8 core )
2008-01-26 14:10:56 +01:00
{
2015-06-18 14:23:00 +02:00
return do_core_configure ( SCLP_CMDW_CONFIGURE_CPU | core < < 8 ) ;
2008-01-26 14:10:56 +01:00
}
2015-06-18 14:23:00 +02:00
int sclp_core_deconfigure ( u8 core )
2008-01-26 14:10:56 +01:00
{
2015-06-18 14:23:00 +02:00
return do_core_configure ( SCLP_CMDW_DECONFIGURE_CPU | core < < 8 ) ;
2008-01-26 14:10:56 +01:00
}
2008-01-26 14:10:57 +01:00
2008-07-14 09:59:19 +02:00
# ifdef CONFIG_MEMORY_HOTPLUG
static DEFINE_MUTEX ( sclp_mem_mutex ) ;
static LIST_HEAD ( sclp_mem_list ) ;
static u8 sclp_max_storage_id ;
2015-05-19 18:37:56 -07:00
static DECLARE_BITMAP ( sclp_storage_ids , 256 ) ;
2009-06-16 10:30:48 +02:00
static int sclp_mem_state_changed ;
2008-07-14 09:59:19 +02:00
struct memory_increment {
struct list_head list ;
u16 rn ;
int standby ;
} ;
struct assign_storage_sccb {
struct sccb_header header ;
u16 rn ;
} __packed ;
2010-03-24 11:49:55 +01:00
int arch_get_memory_phys_device ( unsigned long start_pfn )
{
2015-05-06 13:18:59 +02:00
if ( ! sclp . rzm )
2010-03-24 11:49:55 +01:00
return 0 ;
2015-05-06 13:18:59 +02:00
return PFN_PHYS ( start_pfn ) > > ilog2 ( sclp . rzm ) ;
2010-03-24 11:49:55 +01:00
}
2008-07-14 09:59:19 +02:00
static unsigned long long rn2addr ( u16 rn )
{
2015-05-06 13:18:59 +02:00
return ( unsigned long long ) ( rn - 1 ) * sclp . rzm ;
2008-07-14 09:59:19 +02:00
}
static int do_assign_storage ( sclp_cmdw_t cmd , u16 rn )
{
struct assign_storage_sccb * sccb ;
int rc ;
sccb = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = PAGE_SIZE ;
sccb - > rn = rn ;
2014-03-31 16:18:29 +02:00
rc = sclp_sync_request_timeout ( cmd , sccb , SCLP_QUEUE_INTERVAL ) ;
2008-07-14 09:59:19 +02:00
if ( rc )
goto out ;
switch ( sccb - > header . response_code ) {
case 0x0020 :
case 0x0120 :
break ;
default :
2016-03-03 20:49:57 -08:00
pr_warn ( " assign storage failed (cmd=0x%08x, response=0x%04x, rn=0x%04x) \n " ,
cmd , sccb - > header . response_code , rn ) ;
2008-07-14 09:59:19 +02:00
rc = - EIO ;
break ;
}
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}
static int sclp_assign_storage ( u16 rn )
{
2012-11-02 12:56:43 +01:00
unsigned long long start ;
2012-05-14 11:04:10 +02:00
int rc ;
rc = do_assign_storage ( 0x000d0001 , rn ) ;
if ( rc )
2012-11-02 12:56:43 +01:00
return rc ;
start = rn2addr ( rn ) ;
2015-05-06 13:18:59 +02:00
storage_key_init_range ( start , start + sclp . rzm ) ;
2012-11-02 12:56:43 +01:00
return 0 ;
2008-07-14 09:59:19 +02:00
}
static int sclp_unassign_storage ( u16 rn )
{
return do_assign_storage ( 0x000c0001 , rn ) ;
}
struct attach_storage_sccb {
struct sccb_header header ;
u16 : 16 ;
u16 assigned ;
u32 : 32 ;
u32 entries [ 0 ] ;
} __packed ;
static int sclp_attach_storage ( u8 id )
{
struct attach_storage_sccb * sccb ;
int rc ;
int i ;
sccb = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = PAGE_SIZE ;
2014-03-31 16:18:29 +02:00
rc = sclp_sync_request_timeout ( 0x00080001 | id < < 8 , sccb ,
SCLP_QUEUE_INTERVAL ) ;
2008-07-14 09:59:19 +02:00
if ( rc )
goto out ;
switch ( sccb - > header . response_code ) {
case 0x0020 :
set_bit ( id , sclp_storage_ids ) ;
2011-08-24 17:15:13 +02:00
for ( i = 0 ; i < sccb - > assigned ; i + + ) {
if ( sccb - > entries [ i ] )
sclp_unassign_storage ( sccb - > entries [ i ] > > 16 ) ;
}
2008-07-14 09:59:19 +02:00
break ;
default :
rc = - EIO ;
break ;
}
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}
static int sclp_mem_change_state ( unsigned long start , unsigned long size ,
int online )
{
struct memory_increment * incr ;
unsigned long long istart ;
int rc = 0 ;
list_for_each_entry ( incr , & sclp_mem_list , list ) {
istart = rn2addr ( incr - > rn ) ;
if ( start + size - 1 < istart )
break ;
2015-05-06 13:18:59 +02:00
if ( start > istart + sclp . rzm - 1 )
2008-07-14 09:59:19 +02:00
continue ;
2013-05-02 09:20:30 +02:00
if ( online )
2008-07-14 09:59:19 +02:00
rc | = sclp_assign_storage ( incr - > rn ) ;
2013-05-02 09:20:30 +02:00
else
2008-07-14 09:59:19 +02:00
sclp_unassign_storage ( incr - > rn ) ;
2015-04-01 18:49:11 +02:00
if ( rc = = 0 )
incr - > standby = online ? 0 : 1 ;
2008-07-14 09:59:19 +02:00
}
return rc ? - EIO : 0 ;
}
2015-04-01 18:49:11 +02:00
static bool contains_standby_increment ( unsigned long start , unsigned long end )
{
struct memory_increment * incr ;
unsigned long istart ;
list_for_each_entry ( incr , & sclp_mem_list , list ) {
istart = rn2addr ( incr - > rn ) ;
if ( end - 1 < istart )
continue ;
2015-05-06 13:18:59 +02:00
if ( start > istart + sclp . rzm - 1 )
2015-04-01 18:49:11 +02:00
continue ;
if ( incr - > standby )
return true ;
}
return false ;
}
2008-07-14 09:59:19 +02:00
static int sclp_mem_notifier ( struct notifier_block * nb ,
unsigned long action , void * data )
{
unsigned long start , size ;
struct memory_notify * arg ;
unsigned char id ;
int rc = 0 ;
arg = data ;
start = arg - > start_pfn < < PAGE_SHIFT ;
size = arg - > nr_pages < < PAGE_SHIFT ;
mutex_lock ( & sclp_mem_mutex ) ;
2012-03-23 15:02:05 -07:00
for_each_clear_bit ( id , sclp_storage_ids , sclp_max_storage_id + 1 )
sclp_attach_storage ( id ) ;
2008-07-14 09:59:19 +02:00
switch ( action ) {
2008-08-01 16:39:16 +02:00
case MEM_GOING_OFFLINE :
2015-04-01 18:49:11 +02:00
/*
* We do not allow to set memory blocks offline that contain
* standby memory . This is done to simplify the " memory online "
* case .
*/
if ( contains_standby_increment ( start , start + size ) )
rc = - EPERM ;
break ;
case MEM_ONLINE :
2008-08-01 16:39:16 +02:00
case MEM_CANCEL_OFFLINE :
2008-07-14 09:59:19 +02:00
break ;
case MEM_GOING_ONLINE :
rc = sclp_mem_change_state ( start , size , 1 ) ;
break ;
case MEM_CANCEL_ONLINE :
sclp_mem_change_state ( start , size , 0 ) ;
break ;
2008-08-01 16:39:16 +02:00
case MEM_OFFLINE :
sclp_mem_change_state ( start , size , 0 ) ;
break ;
2008-07-14 09:59:19 +02:00
default :
rc = - EINVAL ;
break ;
}
2009-06-16 10:30:48 +02:00
if ( ! rc )
sclp_mem_state_changed = 1 ;
2008-07-14 09:59:19 +02:00
mutex_unlock ( & sclp_mem_mutex ) ;
return rc ? NOTIFY_BAD : NOTIFY_OK ;
}
static struct notifier_block sclp_mem_nb = {
. notifier_call = sclp_mem_notifier ,
} ;
2015-04-01 18:49:11 +02:00
static void __init align_to_block_size ( unsigned long long * start ,
2014-03-06 18:47:21 +01:00
unsigned long long * size ,
unsigned long long alignment )
2015-04-01 18:49:11 +02:00
{
2014-03-06 18:47:21 +01:00
unsigned long long start_align , size_align ;
2015-04-01 18:49:11 +02:00
start_align = roundup ( * start , alignment ) ;
size_align = rounddown ( * start + * size , alignment ) - start_align ;
pr_info ( " Standby memory at 0x%llx (%lluM of %lluM usable) \n " ,
* start , size_align > > 20 , * size > > 20 ) ;
* start = start_align ;
* size = size_align ;
}
2008-07-14 09:59:19 +02:00
static void __init add_memory_merged ( u16 rn )
{
2014-03-06 18:47:21 +01:00
unsigned long long start , size , addr , block_size ;
2008-07-14 09:59:19 +02:00
static u16 first_rn , num ;
if ( rn & & first_rn & & ( first_rn + num = = rn ) ) {
num + + ;
return ;
}
if ( ! first_rn )
goto skip_add ;
start = rn2addr ( first_rn ) ;
2015-05-06 13:18:59 +02:00
size = ( unsigned long long ) num * sclp . rzm ;
2008-07-14 09:59:19 +02:00
if ( start > = VMEM_MAX_PHYS )
goto skip_add ;
if ( start + size > VMEM_MAX_PHYS )
size = VMEM_MAX_PHYS - start ;
2009-02-19 15:19:01 +01:00
if ( memory_end_set & & ( start > = memory_end ) )
goto skip_add ;
if ( memory_end_set & & ( start + size > memory_end ) )
size = memory_end - start ;
2014-03-06 18:47:21 +01:00
block_size = memory_block_size_bytes ( ) ;
align_to_block_size ( & start , & size , block_size ) ;
if ( ! size )
goto skip_add ;
for ( addr = start ; addr < start + size ; addr + = block_size )
add_memory ( numa_pfn_to_nid ( PFN_DOWN ( addr ) ) , addr , block_size ) ;
2008-07-14 09:59:19 +02:00
skip_add :
first_rn = rn ;
num = 1 ;
}
static void __init sclp_add_standby_memory ( void )
{
struct memory_increment * incr ;
list_for_each_entry ( incr , & sclp_mem_list , list )
if ( incr - > standby )
add_memory_merged ( incr - > rn ) ;
add_memory_merged ( 0 ) ;
}
static void __init insert_increment ( u16 rn , int standby , int assigned )
{
struct memory_increment * incr , * new_incr ;
struct list_head * prev ;
u16 last_rn ;
new_incr = kzalloc ( sizeof ( * new_incr ) , GFP_KERNEL ) ;
if ( ! new_incr )
return ;
new_incr - > rn = rn ;
new_incr - > standby = standby ;
last_rn = 0 ;
prev = & sclp_mem_list ;
list_for_each_entry ( incr , & sclp_mem_list , list ) {
if ( assigned & & incr - > rn > rn )
break ;
if ( ! assigned & & incr - > rn - last_rn > 1 )
break ;
last_rn = incr - > rn ;
prev = & incr - > list ;
}
if ( ! assigned )
new_incr - > rn = last_rn + 1 ;
2015-05-06 13:18:59 +02:00
if ( new_incr - > rn > sclp . rnmax ) {
2008-07-14 09:59:19 +02:00
kfree ( new_incr ) ;
return ;
}
list_add ( & new_incr - > list , prev ) ;
}
2009-06-16 10:30:48 +02:00
static int sclp_mem_freeze ( struct device * dev )
{
if ( ! sclp_mem_state_changed )
return 0 ;
pr_err ( " Memory hotplug state changed, suspend refused. \n " ) ;
return - EPERM ;
}
2008-07-14 09:59:19 +02:00
struct read_storage_sccb {
struct sccb_header header ;
u16 max_id ;
u16 assigned ;
u16 standby ;
u16 : 16 ;
u32 entries [ 0 ] ;
} __packed ;
2009-12-14 18:00:08 -08:00
static const struct dev_pm_ops sclp_mem_pm_ops = {
2009-06-16 10:30:48 +02:00
. freeze = sclp_mem_freeze ,
} ;
static struct platform_driver sclp_mem_pdrv = {
. driver = {
. name = " sclp_mem " ,
. pm = & sclp_mem_pm_ops ,
} ,
} ;
2008-07-14 09:59:19 +02:00
static int __init sclp_detect_standby_memory ( void )
{
2009-06-16 10:30:48 +02:00
struct platform_device * sclp_pdev ;
2008-07-14 09:59:19 +02:00
struct read_storage_sccb * sccb ;
int i , id , assigned , rc ;
2013-03-08 09:29:34 +01:00
if ( OLDMEM_BASE ) /* No standby memory in kdump mode */
return 0 ;
2015-05-06 09:17:51 +02:00
if ( ( sclp . facilities & 0xe00000000000ULL ) ! = 0xe00000000000ULL )
2008-07-14 09:59:19 +02:00
return 0 ;
rc = - ENOMEM ;
sccb = ( void * ) __get_free_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
goto out ;
assigned = 0 ;
for ( id = 0 ; id < = sclp_max_storage_id ; id + + ) {
memset ( sccb , 0 , PAGE_SIZE ) ;
sccb - > header . length = PAGE_SIZE ;
2013-06-06 09:52:08 +02:00
rc = sclp_sync_request ( 0x00040001 | id < < 8 , sccb ) ;
2008-07-14 09:59:19 +02:00
if ( rc )
goto out ;
switch ( sccb - > header . response_code ) {
case 0x0010 :
set_bit ( id , sclp_storage_ids ) ;
for ( i = 0 ; i < sccb - > assigned ; i + + ) {
if ( ! sccb - > entries [ i ] )
continue ;
assigned + + ;
insert_increment ( sccb - > entries [ i ] > > 16 , 0 , 1 ) ;
}
break ;
case 0x0310 :
break ;
case 0x0410 :
for ( i = 0 ; i < sccb - > assigned ; i + + ) {
if ( ! sccb - > entries [ i ] )
continue ;
assigned + + ;
insert_increment ( sccb - > entries [ i ] > > 16 , 1 , 1 ) ;
}
break ;
default :
rc = - EIO ;
break ;
}
if ( ! rc )
sclp_max_storage_id = sccb - > max_id ;
}
if ( rc | | list_empty ( & sclp_mem_list ) )
goto out ;
2015-05-06 13:18:59 +02:00
for ( i = 1 ; i < = sclp . rnmax - assigned ; i + + )
2008-07-14 09:59:19 +02:00
insert_increment ( 0 , 1 , 0 ) ;
rc = register_memory_notifier ( & sclp_mem_nb ) ;
if ( rc )
goto out ;
2009-06-16 10:30:48 +02:00
rc = platform_driver_register ( & sclp_mem_pdrv ) ;
if ( rc )
goto out ;
sclp_pdev = platform_device_register_simple ( " sclp_mem " , - 1 , NULL , 0 ) ;
2014-04-11 13:41:54 +02:00
rc = PTR_ERR_OR_ZERO ( sclp_pdev ) ;
2009-06-16 10:30:48 +02:00
if ( rc )
goto out_driver ;
2008-07-14 09:59:19 +02:00
sclp_add_standby_memory ( ) ;
2009-06-16 10:30:48 +02:00
goto out ;
out_driver :
platform_driver_unregister ( & sclp_mem_pdrv ) ;
2008-07-14 09:59:19 +02:00
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}
__initcall ( sclp_detect_standby_memory ) ;
# endif /* CONFIG_MEMORY_HOTPLUG */
2008-01-26 14:10:57 +01:00
/*
* Channel path configuration related functions .
*/
# define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
# define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
# define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
struct chp_cfg_sccb {
struct sccb_header header ;
u8 ccm ;
u8 reserved [ 6 ] ;
u8 cssid ;
} __attribute__ ( ( packed ) ) ;
static int do_chp_configure ( sclp_cmdw_t cmd )
{
struct chp_cfg_sccb * sccb ;
int rc ;
if ( ! SCLP_HAS_CHP_RECONFIG )
return - EOPNOTSUPP ;
/* Prepare sccb. */
sccb = ( struct chp_cfg_sccb * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = sizeof ( * sccb ) ;
2013-06-06 09:52:08 +02:00
rc = sclp_sync_request ( cmd , sccb ) ;
2008-01-26 14:10:57 +01:00
if ( rc )
goto out ;
switch ( sccb - > header . response_code ) {
case 0x0020 :
case 0x0120 :
case 0x0440 :
case 0x0450 :
break ;
default :
2016-03-03 20:49:57 -08:00
pr_warn ( " configure channel-path failed (cmd=0x%08x, response=0x%04x) \n " ,
cmd , sccb - > header . response_code ) ;
2008-01-26 14:10:57 +01:00
rc = - EIO ;
break ;
}
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}
/**
* sclp_chp_configure - perform configure channel - path sclp command
* @ chpid : channel - path ID
*
* Perform configure channel - path command sclp command for specified chpid .
* Return 0 after command successfully finished , non - zero otherwise .
*/
int sclp_chp_configure ( struct chp_id chpid )
{
return do_chp_configure ( SCLP_CMDW_CONFIGURE_CHPATH | chpid . id < < 8 ) ;
}
/**
* sclp_chp_deconfigure - perform deconfigure channel - path sclp command
* @ chpid : channel - path ID
*
* Perform deconfigure channel - path command sclp command for specified chpid
* and wait for completion . On success return 0. Return non - zero otherwise .
*/
int sclp_chp_deconfigure ( struct chp_id chpid )
{
return do_chp_configure ( SCLP_CMDW_DECONFIGURE_CHPATH | chpid . id < < 8 ) ;
}
struct chp_info_sccb {
struct sccb_header header ;
u8 recognized [ SCLP_CHP_INFO_MASK_SIZE ] ;
u8 standby [ SCLP_CHP_INFO_MASK_SIZE ] ;
u8 configured [ SCLP_CHP_INFO_MASK_SIZE ] ;
u8 ccm ;
u8 reserved [ 6 ] ;
u8 cssid ;
} __attribute__ ( ( packed ) ) ;
/**
* sclp_chp_read_info - perform read channel - path information sclp command
* @ info : resulting channel - path information data
*
* Perform read channel - path information sclp command and wait for completion .
* On success , store channel - path information in @ info and return 0. Return
* non - zero otherwise .
*/
int sclp_chp_read_info ( struct sclp_chp_info * info )
{
struct chp_info_sccb * sccb ;
int rc ;
if ( ! SCLP_HAS_CHP_INFO )
return - EOPNOTSUPP ;
/* Prepare sccb. */
sccb = ( struct chp_info_sccb * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! sccb )
return - ENOMEM ;
sccb - > header . length = sizeof ( * sccb ) ;
2013-06-06 09:52:08 +02:00
rc = sclp_sync_request ( SCLP_CMDW_READ_CHPATH_INFORMATION , sccb ) ;
2008-01-26 14:10:57 +01:00
if ( rc )
goto out ;
if ( sccb - > header . response_code ! = 0x0010 ) {
2016-03-03 20:49:57 -08:00
pr_warn ( " read channel-path info failed (response=0x%04x) \n " ,
sccb - > header . response_code ) ;
2008-01-26 14:10:57 +01:00
rc = - EIO ;
goto out ;
}
memcpy ( info - > recognized , sccb - > recognized , SCLP_CHP_INFO_MASK_SIZE ) ;
memcpy ( info - > standby , sccb - > standby , SCLP_CHP_INFO_MASK_SIZE ) ;
memcpy ( info - > configured , sccb - > configured , SCLP_CHP_INFO_MASK_SIZE ) ;
out :
free_page ( ( unsigned long ) sccb ) ;
return rc ;
}