2005-04-17 02:20:36 +04:00
/*
* File . . . . . . . . . . . : linux / drivers / s390 / block / dasd . c
* Author ( s ) . . . . . . : Holger Smolinski < Holger . Smolinski @ de . ibm . com >
* Horst Hummel < Horst . Hummel @ de . ibm . com >
* Carsten Otte < Cotte @ de . ibm . com >
* Martin Schwidefsky < schwidefsky @ de . ibm . com >
* Bugreports . to . . : < Linux390 @ de . ibm . com >
2009-06-16 12:30:25 +04:00
* Copyright IBM Corp . 1999 , 2009
2005-04-17 02:20:36 +04:00
*/
2009-03-26 17:23:49 +03:00
# define KMSG_COMPONENT "dasd"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2005-04-17 02:20:36 +04:00
# include <linux/kmod.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/ctype.h>
# include <linux/major.h>
# include <linux/slab.h>
2006-01-08 12:02:50 +03:00
# include <linux/hdreg.h>
2009-04-14 17:36:23 +04:00
# include <linux/async.h>
2010-02-27 00:37:46 +03:00
# include <linux/mutex.h>
2011-07-24 12:48:32 +04:00
# include <linux/debugfs.h>
# include <linux/seq_file.h>
2011-08-03 18:44:20 +04:00
# include <linux/vmalloc.h>
2005-04-17 02:20:36 +04:00
# include <asm/ccwdev.h>
# include <asm/ebcdic.h>
# include <asm/idals.h>
2009-03-26 17:23:48 +03:00
# include <asm/itcw.h>
2010-03-08 14:26:24 +03:00
# include <asm/diag.h>
2005-04-17 02:20:36 +04:00
/* This is ugly... */
# define PRINTK_HEADER "dasd:"
# include "dasd_int.h"
/*
* SECTION : Constant definitions to be used within this file
*/
# define DASD_CHANQ_MAX_SIZE 4
2010-05-12 11:32:11 +04:00
# define DASD_SLEEPON_START_TAG (void *) 1
# define DASD_SLEEPON_END_TAG (void *) 2
2005-04-17 02:20:36 +04:00
/*
* SECTION : exported variables of dasd . c
*/
debug_info_t * dasd_debug_area ;
2011-07-24 12:48:32 +04:00
static struct dentry * dasd_debugfs_root_entry ;
2005-04-17 02:20:36 +04:00
struct dasd_discipline * dasd_diag_discipline_pointer ;
2007-02-05 23:16:47 +03:00
void dasd_int_handler ( struct ccw_device * , unsigned long , struct irb * ) ;
2005-04-17 02:20:36 +04:00
MODULE_AUTHOR ( " Holger Smolinski <Holger.Smolinski@de.ibm.com> " ) ;
MODULE_DESCRIPTION ( " Linux on S/390 DASD device driver, "
" Copyright 2000 IBM Corporation " ) ;
MODULE_SUPPORTED_DEVICE ( " dasd " ) ;
MODULE_LICENSE ( " GPL " ) ;
/*
* SECTION : prototypes for static functions of dasd . c
*/
2008-01-26 16:11:23 +03:00
static int dasd_alloc_queue ( struct dasd_block * ) ;
static void dasd_setup_queue ( struct dasd_block * ) ;
static void dasd_free_queue ( struct dasd_block * ) ;
static void dasd_flush_request_queue ( struct dasd_block * ) ;
static int dasd_flush_block_queue ( struct dasd_block * ) ;
static void dasd_device_tasklet ( struct dasd_device * ) ;
static void dasd_block_tasklet ( struct dasd_block * ) ;
2006-12-06 22:18:20 +03:00
static void do_kick_device ( struct work_struct * ) ;
2009-06-16 12:30:25 +04:00
static void do_restore_device ( struct work_struct * ) ;
2010-05-17 12:00:10 +04:00
static void do_reload_device ( struct work_struct * ) ;
2008-01-26 16:11:23 +03:00
static void dasd_return_cqr_cb ( struct dasd_ccw_req * , void * ) ;
2009-02-11 12:37:31 +03:00
static void dasd_device_timeout ( unsigned long ) ;
static void dasd_block_timeout ( unsigned long ) ;
2009-12-07 14:51:51 +03:00
static void __dasd_process_erp ( struct dasd_device * , struct dasd_ccw_req * ) ;
2011-07-24 12:48:32 +04:00
static void dasd_profile_init ( struct dasd_profile * , struct dentry * ) ;
static void dasd_profile_exit ( struct dasd_profile * ) ;
2005-04-17 02:20:36 +04:00
/*
* SECTION : Operations on the device structure .
*/
static wait_queue_head_t dasd_init_waitq ;
2006-08-30 16:33:33 +04:00
static wait_queue_head_t dasd_flush_wq ;
2008-05-30 12:03:31 +04:00
static wait_queue_head_t generic_waitq ;
2005-04-17 02:20:36 +04:00
/*
* Allocate memory for a new device structure .
*/
2008-01-26 16:11:23 +03:00
struct dasd_device * dasd_alloc_device ( void )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
device = kzalloc ( sizeof ( struct dasd_device ) , GFP_ATOMIC ) ;
if ( ! device )
2005-04-17 02:20:36 +04:00
return ERR_PTR ( - ENOMEM ) ;
/* Get two pages for normal block device operations. */
device - > ccw_mem = ( void * ) __get_free_pages ( GFP_ATOMIC | GFP_DMA , 1 ) ;
2008-01-26 16:11:23 +03:00
if ( ! device - > ccw_mem ) {
2005-04-17 02:20:36 +04:00
kfree ( device ) ;
return ERR_PTR ( - ENOMEM ) ;
}
/* Get one page for error recovery. */
device - > erp_mem = ( void * ) get_zeroed_page ( GFP_ATOMIC | GFP_DMA ) ;
2008-01-26 16:11:23 +03:00
if ( ! device - > erp_mem ) {
2005-04-17 02:20:36 +04:00
free_pages ( ( unsigned long ) device - > ccw_mem , 1 ) ;
kfree ( device ) ;
return ERR_PTR ( - ENOMEM ) ;
}
dasd_init_chunklist ( & device - > ccw_chunks , device - > ccw_mem , PAGE_SIZE * 2 ) ;
dasd_init_chunklist ( & device - > erp_chunks , device - > erp_mem , PAGE_SIZE ) ;
spin_lock_init ( & device - > mem_lock ) ;
2008-01-26 16:11:23 +03:00
atomic_set ( & device - > tasklet_scheduled , 0 ) ;
2006-06-29 16:58:12 +04:00
tasklet_init ( & device - > tasklet ,
2008-01-26 16:11:23 +03:00
( void ( * ) ( unsigned long ) ) dasd_device_tasklet ,
2005-04-17 02:20:36 +04:00
( unsigned long ) device ) ;
INIT_LIST_HEAD ( & device - > ccw_queue ) ;
init_timer ( & device - > timer ) ;
2009-02-11 12:37:31 +03:00
device - > timer . function = dasd_device_timeout ;
device - > timer . data = ( unsigned long ) device ;
2006-12-06 22:18:20 +03:00
INIT_WORK ( & device - > kick_work , do_kick_device ) ;
2009-06-16 12:30:25 +04:00
INIT_WORK ( & device - > restore_device , do_restore_device ) ;
2010-05-17 12:00:10 +04:00
INIT_WORK ( & device - > reload_device , do_reload_device ) ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_NEW ;
device - > target = DASD_STATE_NEW ;
2010-02-27 00:37:46 +03:00
mutex_init ( & device - > state_mutex ) ;
2011-07-24 12:48:32 +04:00
spin_lock_init ( & device - > profile . lock ) ;
2005-04-17 02:20:36 +04:00
return device ;
}
/*
* Free memory of a device structure .
*/
2008-01-26 16:11:23 +03:00
void dasd_free_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2005-11-07 12:01:30 +03:00
kfree ( device - > private ) ;
2005-04-17 02:20:36 +04:00
free_page ( ( unsigned long ) device - > erp_mem ) ;
free_pages ( ( unsigned long ) device - > ccw_mem , 1 ) ;
kfree ( device ) ;
}
2008-01-26 16:11:23 +03:00
/*
* Allocate memory for a new device structure .
*/
struct dasd_block * dasd_alloc_block ( void )
{
struct dasd_block * block ;
block = kzalloc ( sizeof ( * block ) , GFP_ATOMIC ) ;
if ( ! block )
return ERR_PTR ( - ENOMEM ) ;
/* open_count = 0 means device online but not in use */
atomic_set ( & block - > open_count , - 1 ) ;
spin_lock_init ( & block - > request_queue_lock ) ;
atomic_set ( & block - > tasklet_scheduled , 0 ) ;
tasklet_init ( & block - > tasklet ,
( void ( * ) ( unsigned long ) ) dasd_block_tasklet ,
( unsigned long ) block ) ;
INIT_LIST_HEAD ( & block - > ccw_queue ) ;
spin_lock_init ( & block - > queue_lock ) ;
init_timer ( & block - > timer ) ;
2009-02-11 12:37:31 +03:00
block - > timer . function = dasd_block_timeout ;
block - > timer . data = ( unsigned long ) block ;
2011-07-24 12:48:32 +04:00
spin_lock_init ( & block - > profile . lock ) ;
2008-01-26 16:11:23 +03:00
return block ;
}
/*
* Free memory of a device structure .
*/
void dasd_free_block ( struct dasd_block * block )
{
kfree ( block ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Make a new device known to the system .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_new_to_known ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
/*
2006-06-29 16:58:12 +04:00
* As long as the device is not in state DASD_STATE_NEW we want to
2005-04-17 02:20:36 +04:00
* keep the reference count > 0.
*/
dasd_get_device ( device ) ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
rc = dasd_alloc_queue ( device - > block ) ;
if ( rc ) {
dasd_put_device ( device ) ;
return rc ;
}
2005-04-17 02:20:36 +04:00
}
device - > state = DASD_STATE_KNOWN ;
return 0 ;
}
/*
* Let the system forget about a device .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_known_to_new ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-03-24 14:15:25 +03:00
/* Disable extended error reporting for this device. */
dasd_eer_disable ( device ) ;
2005-04-17 02:20:36 +04:00
/* Forget the discipline information. */
2008-01-26 16:11:23 +03:00
if ( device - > discipline ) {
if ( device - > discipline - > uncheck_device )
device - > discipline - > uncheck_device ( device ) ;
2006-02-21 05:28:13 +03:00
module_put ( device - > discipline - > owner ) ;
2008-01-26 16:11:23 +03:00
}
2005-04-17 02:20:36 +04:00
device - > discipline = NULL ;
2006-02-21 05:28:13 +03:00
if ( device - > base_discipline )
module_put ( device - > base_discipline - > owner ) ;
device - > base_discipline = NULL ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_NEW ;
2008-01-26 16:11:23 +03:00
if ( device - > block )
dasd_free_queue ( device - > block ) ;
2005-04-17 02:20:36 +04:00
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device ( device ) ;
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2011-07-24 12:48:32 +04:00
static struct dentry * dasd_debugfs_setup ( const char * name ,
struct dentry * base_dentry )
{
struct dentry * pde ;
if ( ! base_dentry )
return NULL ;
pde = debugfs_create_dir ( name , base_dentry ) ;
if ( ! pde | | IS_ERR ( pde ) )
return NULL ;
return pde ;
}
2005-04-17 02:20:36 +04:00
/*
* Request the irq line for the device .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_known_to_basic ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2011-07-24 12:48:32 +04:00
struct dasd_block * block = device - > block ;
2005-04-17 02:20:36 +04:00
int rc ;
/* Allocate and register gendisk structure. */
2011-07-24 12:48:32 +04:00
if ( block ) {
rc = dasd_gendisk_alloc ( block ) ;
2008-01-26 16:11:23 +03:00
if ( rc )
return rc ;
2011-07-24 12:48:32 +04:00
block - > debugfs_dentry =
dasd_debugfs_setup ( block - > gdp - > disk_name ,
dasd_debugfs_root_entry ) ;
dasd_profile_init ( & block - > profile , block - > debugfs_dentry ) ;
if ( dasd_global_profile_level = = DASD_PROFILE_ON )
dasd_profile_on ( & device - > block - > profile ) ;
}
device - > debugfs_dentry =
dasd_debugfs_setup ( dev_name ( & device - > cdev - > dev ) ,
dasd_debugfs_root_entry ) ;
dasd_profile_init ( & device - > profile , device - > debugfs_dentry ) ;
2005-04-17 02:20:36 +04:00
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
2009-03-26 17:23:49 +03:00
device - > debug_area = debug_register ( dev_name ( & device - > cdev - > dev ) , 4 , 1 ,
2008-01-26 16:11:23 +03:00
8 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
debug_register_view ( device - > debug_area , & debug_sprintf_view ) ;
2006-09-20 17:59:07 +04:00
debug_set_level ( device - > debug_area , DBF_WARNING ) ;
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_EMERG , device , " %s " , " debug area created " ) ;
device - > state = DASD_STATE_BASIC ;
return 0 ;
}
/*
* Release the irq line for the device . Terminate any running i / o .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_basic_to_known ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
2011-07-24 12:48:32 +04:00
dasd_profile_exit ( & device - > block - > profile ) ;
if ( device - > block - > debugfs_dentry )
debugfs_remove ( device - > block - > debugfs_dentry ) ;
2008-01-26 16:11:23 +03:00
dasd_gendisk_free ( device - > block ) ;
dasd_block_clear_timer ( device - > block ) ;
}
rc = dasd_flush_device_queue ( device ) ;
2006-08-30 16:33:33 +04:00
if ( rc )
return rc ;
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
2011-07-24 12:48:32 +04:00
dasd_profile_exit ( & device - > profile ) ;
if ( device - > debugfs_dentry )
debugfs_remove ( device - > debugfs_dentry ) ;
2006-08-30 16:33:33 +04:00
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_EMERG , device , " %p debug area deleted " , device ) ;
if ( device - > debug_area ! = NULL ) {
debug_unregister ( device - > debug_area ) ;
device - > debug_area = NULL ;
}
device - > state = DASD_STATE_KNOWN ;
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Do the initial analysis . The do_analysis function may return
* - EAGAIN in which case the device keeps the state DASD_STATE_BASIC
* until the discipline decides to continue the startup sequence
* by calling the function dasd_change_state . The eckd disciplines
* uses this to start a ccw that detects the format . The completion
* interrupt for this detection ccw uses the kernel event daemon to
* trigger the call to dasd_change_state . All this is done in the
* discipline code , see dasd_eckd . c .
2006-03-08 08:55:39 +03:00
* After the analysis ccw is done ( do_analysis returned 0 ) the block
* device is setup .
* In case the analysis returns an error , the device setup is stopped
* ( a fake disk was already added to allow formatting ) .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_basic_to_ready ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
rc = 0 ;
2008-01-26 16:11:23 +03:00
block = device - > block ;
2006-03-08 08:55:39 +03:00
/* make disk known with correct capacity */
2008-01-26 16:11:23 +03:00
if ( block ) {
if ( block - > base - > discipline - > do_analysis ! = NULL )
rc = block - > base - > discipline - > do_analysis ( block ) ;
if ( rc ) {
if ( rc ! = - EAGAIN )
device - > state = DASD_STATE_UNFMT ;
return rc ;
}
dasd_setup_queue ( block ) ;
set_capacity ( block - > gdp ,
block - > blocks < < block - > s2b_shift ) ;
device - > state = DASD_STATE_READY ;
rc = dasd_scan_partitions ( block ) ;
if ( rc )
device - > state = DASD_STATE_BASIC ;
} else {
device - > state = DASD_STATE_READY ;
}
2006-03-08 08:55:39 +03:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* Remove device from block device layer . Destroy dirty buffers .
* Forget format information . Check if the target level is basic
* and if it is create fake disk for formatting .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_ready_to_basic ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_BASIC ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
struct dasd_block * block = device - > block ;
rc = dasd_flush_block_queue ( block ) ;
if ( rc ) {
device - > state = DASD_STATE_READY ;
return rc ;
}
dasd_flush_request_queue ( block ) ;
2010-02-27 00:37:48 +03:00
dasd_destroy_partitions ( block ) ;
2008-01-26 16:11:23 +03:00
block - > blocks = 0 ;
block - > bp_block = 0 ;
block - > s2b_shift = 0 ;
}
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-03-08 08:55:39 +03:00
/*
* Back to basic .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_unfmt_to_basic ( struct dasd_device * device )
2006-03-08 08:55:39 +03:00
{
device - > state = DASD_STATE_BASIC ;
2006-08-30 16:33:33 +04:00
return 0 ;
2006-03-08 08:55:39 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Make the device online and schedule the bottom half to start
* the requeueing of requests from the linux request queue to the
* ccw queue .
*/
2006-08-30 16:33:33 +04:00
static int
2005-04-17 02:20:36 +04:00
dasd_state_ready_to_online ( struct dasd_device * device )
{
2008-01-26 16:11:23 +03:00
int rc ;
2009-01-09 14:14:50 +03:00
struct gendisk * disk ;
struct disk_part_iter piter ;
struct hd_struct * part ;
2008-01-26 16:11:23 +03:00
if ( device - > discipline - > ready_to_online ) {
rc = device - > discipline - > ready_to_online ( device ) ;
if ( rc )
return rc ;
}
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_ONLINE ;
2009-01-09 14:14:50 +03:00
if ( device - > block ) {
2008-01-26 16:11:23 +03:00
dasd_schedule_block_bh ( device - > block ) ;
2011-01-05 14:48:06 +03:00
if ( ( device - > features & DASD_FEATURE_USERAW ) ) {
disk = device - > block - > gdp ;
kobject_uevent ( & disk_to_dev ( disk ) - > kobj , KOBJ_CHANGE ) ;
return 0 ;
}
2009-01-09 14:14:50 +03:00
disk = device - > block - > bdev - > bd_disk ;
disk_part_iter_init ( & piter , disk , DISK_PITER_INCL_PART0 ) ;
while ( ( part = disk_part_iter_next ( & piter ) ) )
kobject_uevent ( & part_to_dev ( part ) - > kobj , KOBJ_CHANGE ) ;
disk_part_iter_exit ( & piter ) ;
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
/*
* Stop the requeueing of requests again .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_online_to_ready ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
int rc ;
2009-01-09 14:14:50 +03:00
struct gendisk * disk ;
struct disk_part_iter piter ;
struct hd_struct * part ;
2008-01-26 16:11:23 +03:00
if ( device - > discipline - > online_to_ready ) {
rc = device - > discipline - > online_to_ready ( device ) ;
if ( rc )
return rc ;
}
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_READY ;
2011-01-05 14:48:06 +03:00
if ( device - > block & & ! ( device - > features & DASD_FEATURE_USERAW ) ) {
2009-01-09 14:14:50 +03:00
disk = device - > block - > bdev - > bd_disk ;
disk_part_iter_init ( & piter , disk , DISK_PITER_INCL_PART0 ) ;
while ( ( part = disk_part_iter_next ( & piter ) ) )
kobject_uevent ( & part_to_dev ( part ) - > kobj , KOBJ_CHANGE ) ;
disk_part_iter_exit ( & piter ) ;
}
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Device startup state changes .
*/
2008-01-26 16:11:23 +03:00
static int dasd_increase_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
rc = 0 ;
if ( device - > state = = DASD_STATE_NEW & &
device - > target > = DASD_STATE_KNOWN )
rc = dasd_state_new_to_known ( device ) ;
if ( ! rc & &
device - > state = = DASD_STATE_KNOWN & &
device - > target > = DASD_STATE_BASIC )
rc = dasd_state_known_to_basic ( device ) ;
if ( ! rc & &
device - > state = = DASD_STATE_BASIC & &
device - > target > = DASD_STATE_READY )
rc = dasd_state_basic_to_ready ( device ) ;
2006-04-28 05:40:10 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_UNFMT & &
device - > target > DASD_STATE_UNFMT )
rc = - EPERM ;
2005-04-17 02:20:36 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_READY & &
device - > target > = DASD_STATE_ONLINE )
rc = dasd_state_ready_to_online ( device ) ;
return rc ;
}
/*
* Device shutdown state changes .
*/
2008-01-26 16:11:23 +03:00
static int dasd_decrease_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
rc = 0 ;
2005-04-17 02:20:36 +04:00
if ( device - > state = = DASD_STATE_ONLINE & &
device - > target < = DASD_STATE_READY )
2006-08-30 16:33:33 +04:00
rc = dasd_state_online_to_ready ( device ) ;
2006-06-29 16:58:12 +04:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_READY & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_BASIC )
2006-08-30 16:33:33 +04:00
rc = dasd_state_ready_to_basic ( device ) ;
2006-03-08 08:55:39 +03:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_UNFMT & &
2006-03-08 08:55:39 +03:00
device - > target < = DASD_STATE_BASIC )
2006-08-30 16:33:33 +04:00
rc = dasd_state_unfmt_to_basic ( device ) ;
2006-03-08 08:55:39 +03:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_BASIC & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_KNOWN )
2006-08-30 16:33:33 +04:00
rc = dasd_state_basic_to_known ( device ) ;
2006-06-29 16:58:12 +04:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_KNOWN & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_NEW )
2006-08-30 16:33:33 +04:00
rc = dasd_state_known_to_new ( device ) ;
2005-04-17 02:20:36 +04:00
2006-08-30 16:33:33 +04:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* This is the main startup / shutdown routine .
*/
2008-01-26 16:11:23 +03:00
static void dasd_change_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2009-06-22 14:08:21 +04:00
int rc ;
2005-04-17 02:20:36 +04:00
if ( device - > state = = device - > target )
/* Already where we want to go today... */
return ;
if ( device - > state < device - > target )
rc = dasd_increase_state ( device ) ;
else
rc = dasd_decrease_state ( device ) ;
2009-06-22 14:08:21 +04:00
if ( rc = = - EAGAIN )
return ;
if ( rc )
device - > target = device - > state ;
2005-04-17 02:20:36 +04:00
2010-02-27 00:37:46 +03:00
if ( device - > state = = device - > target )
2005-04-17 02:20:36 +04:00
wake_up ( & dasd_init_waitq ) ;
2007-04-27 18:01:47 +04:00
/* let user-space know that the device status changed */
kobject_uevent ( & device - > cdev - > dev . kobj , KOBJ_CHANGE ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Kick starter for devices that did not complete the startup / shutdown
* procedure or were sleeping because of a pending state .
* dasd_kick_device will schedule a call do do_kick_device to the kernel
* event daemon .
*/
2008-01-26 16:11:23 +03:00
static void do_kick_device ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-12-06 22:18:20 +03:00
struct dasd_device * device = container_of ( work , struct dasd_device , kick_work ) ;
2010-02-27 00:37:46 +03:00
mutex_lock ( & device - > state_mutex ) ;
2005-04-17 02:20:36 +04:00
dasd_change_state ( device ) ;
2010-02-27 00:37:46 +03:00
mutex_unlock ( & device - > state_mutex ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_kick_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
dasd_get_device ( device ) ;
/* queue call to dasd_kick_device to the kernel event daemon. */
schedule_work ( & device - > kick_work ) ;
}
2010-05-17 12:00:10 +04:00
/*
* dasd_reload_device will schedule a call do do_reload_device to the kernel
* event daemon .
*/
static void do_reload_device ( struct work_struct * work )
{
struct dasd_device * device = container_of ( work , struct dasd_device ,
reload_device ) ;
device - > discipline - > reload ( device ) ;
dasd_put_device ( device ) ;
}
void dasd_reload_device ( struct dasd_device * device )
{
dasd_get_device ( device ) ;
/* queue call to dasd_reload_device to the kernel event daemon. */
schedule_work ( & device - > reload_device ) ;
}
EXPORT_SYMBOL ( dasd_reload_device ) ;
2009-06-16 12:30:25 +04:00
/*
* dasd_restore_device will schedule a call do do_restore_device to the kernel
* event daemon .
*/
static void do_restore_device ( struct work_struct * work )
{
struct dasd_device * device = container_of ( work , struct dasd_device ,
restore_device ) ;
device - > cdev - > drv - > restore ( device - > cdev ) ;
dasd_put_device ( device ) ;
}
void dasd_restore_device ( struct dasd_device * device )
{
dasd_get_device ( device ) ;
/* queue call to dasd_restore_device to the kernel event daemon. */
schedule_work ( & device - > restore_device ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Set the target state for a device and starts the state change .
*/
2008-01-26 16:11:23 +03:00
void dasd_set_target_state ( struct dasd_device * device , int target )
2005-04-17 02:20:36 +04:00
{
2009-04-14 17:36:23 +04:00
dasd_get_device ( device ) ;
2010-02-27 00:37:46 +03:00
mutex_lock ( & device - > state_mutex ) ;
2005-04-17 02:20:36 +04:00
/* If we are in probeonly mode stop at DASD_STATE_READY. */
if ( dasd_probeonly & & target > DASD_STATE_READY )
target = DASD_STATE_READY ;
if ( device - > target ! = target ) {
2010-02-27 00:37:46 +03:00
if ( device - > state = = target )
2005-04-17 02:20:36 +04:00
wake_up ( & dasd_init_waitq ) ;
device - > target = target ;
}
if ( device - > state ! = device - > target )
dasd_change_state ( device ) ;
2010-02-27 00:37:46 +03:00
mutex_unlock ( & device - > state_mutex ) ;
dasd_put_device ( device ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Enable devices with device numbers in [ from . . to ] .
*/
2008-01-26 16:11:23 +03:00
static inline int _wait_for_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
return ( device - > state = = device - > target ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_enable_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
dasd_set_target_state ( device , DASD_STATE_ONLINE ) ;
if ( device - > state < = DASD_STATE_KNOWN )
/* No discipline for device found. */
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* Now wait for the devices to come up. */
wait_event ( dasd_init_waitq , _wait_for_device ( device ) ) ;
}
/*
* SECTION : device operation ( interrupt handler , start i / o , term i / o . . . )
*/
2011-07-24 12:48:32 +04:00
unsigned int dasd_global_profile_level = DASD_PROFILE_OFF ;
2005-04-17 02:20:36 +04:00
2011-07-24 12:48:32 +04:00
# ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info dasd_global_profile_data ;
static struct dentry * dasd_global_profile_dentry ;
static struct dentry * dasd_debugfs_global_entry ;
2005-04-17 02:20:36 +04:00
/*
* Add profiling information for cqr before execution .
*/
2008-01-26 16:11:23 +03:00
static void dasd_profile_start ( struct dasd_block * block ,
struct dasd_ccw_req * cqr ,
struct request * req )
2005-04-17 02:20:36 +04:00
{
struct list_head * l ;
unsigned int counter ;
2011-07-24 12:48:32 +04:00
struct dasd_device * device ;
2005-04-17 02:20:36 +04:00
/* count the length of the chanq for statistics */
counter = 0 ;
2011-07-24 12:48:32 +04:00
if ( dasd_global_profile_level | | block - > profile . data )
list_for_each ( l , & block - > ccw_queue )
if ( + + counter > = 31 )
break ;
if ( dasd_global_profile_level ) {
dasd_global_profile_data . dasd_io_nr_req [ counter ] + + ;
if ( rq_data_dir ( req ) = = READ )
dasd_global_profile_data . dasd_read_nr_req [ counter ] + + ;
}
spin_lock ( & block - > profile . lock ) ;
if ( block - > profile . data )
block - > profile . data - > dasd_io_nr_req [ counter ] + + ;
if ( rq_data_dir ( req ) = = READ )
block - > profile . data - > dasd_read_nr_req [ counter ] + + ;
spin_unlock ( & block - > profile . lock ) ;
/*
* We count the request for the start device , even though it may run on
* some other device due to error recovery . This way we make sure that
* we count each request only once .
*/
device = cqr - > startdev ;
if ( device - > profile . data ) {
counter = 1 ; /* request is not yet queued on the start device */
list_for_each ( l , & device - > ccw_queue )
if ( + + counter > = 31 )
break ;
}
spin_lock ( & device - > profile . lock ) ;
if ( device - > profile . data ) {
device - > profile . data - > dasd_io_nr_req [ counter ] + + ;
if ( rq_data_dir ( req ) = = READ )
device - > profile . data - > dasd_read_nr_req [ counter ] + + ;
}
spin_unlock ( & device - > profile . lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Add profiling information for cqr after execution .
*/
2011-07-24 12:48:32 +04:00
# define dasd_profile_counter(value, index) \
{ \
for ( index = 0 ; index < 31 & & value > > ( 2 + index ) ; index + + ) \
; \
}
static void dasd_profile_end_add_data ( struct dasd_profile_info * data ,
int is_alias ,
int is_tpm ,
int is_read ,
long sectors ,
int sectors_ind ,
int tottime_ind ,
int tottimeps_ind ,
int strtime_ind ,
int irqtime_ind ,
int irqtimeps_ind ,
int endtime_ind )
{
/* in case of an overflow, reset the whole profile */
if ( data - > dasd_io_reqs = = UINT_MAX ) {
memset ( data , 0 , sizeof ( * data ) ) ;
getnstimeofday ( & data - > starttod ) ;
}
data - > dasd_io_reqs + + ;
data - > dasd_io_sects + = sectors ;
if ( is_alias )
data - > dasd_io_alias + + ;
if ( is_tpm )
data - > dasd_io_tpm + + ;
data - > dasd_io_secs [ sectors_ind ] + + ;
data - > dasd_io_times [ tottime_ind ] + + ;
data - > dasd_io_timps [ tottimeps_ind ] + + ;
data - > dasd_io_time1 [ strtime_ind ] + + ;
data - > dasd_io_time2 [ irqtime_ind ] + + ;
data - > dasd_io_time2ps [ irqtimeps_ind ] + + ;
data - > dasd_io_time3 [ endtime_ind ] + + ;
if ( is_read ) {
data - > dasd_read_reqs + + ;
data - > dasd_read_sects + = sectors ;
if ( is_alias )
data - > dasd_read_alias + + ;
if ( is_tpm )
data - > dasd_read_tpm + + ;
data - > dasd_read_secs [ sectors_ind ] + + ;
data - > dasd_read_times [ tottime_ind ] + + ;
data - > dasd_read_time1 [ strtime_ind ] + + ;
data - > dasd_read_time2 [ irqtime_ind ] + + ;
data - > dasd_read_time3 [ endtime_ind ] + + ;
}
}
2008-01-26 16:11:23 +03:00
static void dasd_profile_end ( struct dasd_block * block ,
struct dasd_ccw_req * cqr ,
struct request * req )
2005-04-17 02:20:36 +04:00
{
long strtime , irqtime , endtime , tottime ; /* in microseconds */
long tottimeps , sectors ;
2011-07-24 12:48:32 +04:00
struct dasd_device * device ;
int sectors_ind , tottime_ind , tottimeps_ind , strtime_ind ;
int irqtime_ind , irqtimeps_ind , endtime_ind ;
2005-04-17 02:20:36 +04:00
2011-07-24 12:48:32 +04:00
device = cqr - > startdev ;
if ( ! ( dasd_global_profile_level | |
block - > profile . data | |
device - > profile . data ) )
2005-04-17 02:20:36 +04:00
return ;
2009-05-07 17:24:39 +04:00
sectors = blk_rq_sectors ( req ) ;
2005-04-17 02:20:36 +04:00
if ( ! cqr - > buildclk | | ! cqr - > startclk | |
! cqr - > stopclk | | ! cqr - > endclk | |
! sectors )
return ;
strtime = ( ( cqr - > startclk - cqr - > buildclk ) > > 12 ) ;
irqtime = ( ( cqr - > stopclk - cqr - > startclk ) > > 12 ) ;
endtime = ( ( cqr - > endclk - cqr - > stopclk ) > > 12 ) ;
tottime = ( ( cqr - > endclk - cqr - > buildclk ) > > 12 ) ;
tottimeps = tottime / sectors ;
2011-07-24 12:48:32 +04:00
dasd_profile_counter ( sectors , sectors_ind ) ;
dasd_profile_counter ( tottime , tottime_ind ) ;
dasd_profile_counter ( tottimeps , tottimeps_ind ) ;
dasd_profile_counter ( strtime , strtime_ind ) ;
dasd_profile_counter ( irqtime , irqtime_ind ) ;
dasd_profile_counter ( irqtime / sectors , irqtimeps_ind ) ;
dasd_profile_counter ( endtime , endtime_ind ) ;
if ( dasd_global_profile_level ) {
dasd_profile_end_add_data ( & dasd_global_profile_data ,
cqr - > startdev ! = block - > base ,
cqr - > cpmode = = 1 ,
rq_data_dir ( req ) = = READ ,
sectors , sectors_ind , tottime_ind ,
tottimeps_ind , strtime_ind ,
irqtime_ind , irqtimeps_ind ,
endtime_ind ) ;
}
spin_lock ( & block - > profile . lock ) ;
if ( block - > profile . data )
dasd_profile_end_add_data ( block - > profile . data ,
cqr - > startdev ! = block - > base ,
cqr - > cpmode = = 1 ,
rq_data_dir ( req ) = = READ ,
sectors , sectors_ind , tottime_ind ,
tottimeps_ind , strtime_ind ,
irqtime_ind , irqtimeps_ind ,
endtime_ind ) ;
spin_unlock ( & block - > profile . lock ) ;
spin_lock ( & device - > profile . lock ) ;
if ( device - > profile . data )
dasd_profile_end_add_data ( device - > profile . data ,
cqr - > startdev ! = block - > base ,
cqr - > cpmode = = 1 ,
rq_data_dir ( req ) = = READ ,
sectors , sectors_ind , tottime_ind ,
tottimeps_ind , strtime_ind ,
irqtime_ind , irqtimeps_ind ,
endtime_ind ) ;
spin_unlock ( & device - > profile . lock ) ;
}
void dasd_profile_reset ( struct dasd_profile * profile )
{
struct dasd_profile_info * data ;
spin_lock_bh ( & profile - > lock ) ;
data = profile - > data ;
if ( ! data ) {
spin_unlock_bh ( & profile - > lock ) ;
return ;
}
memset ( data , 0 , sizeof ( * data ) ) ;
getnstimeofday ( & data - > starttod ) ;
spin_unlock_bh ( & profile - > lock ) ;
}
void dasd_global_profile_reset ( void )
{
memset ( & dasd_global_profile_data , 0 , sizeof ( dasd_global_profile_data ) ) ;
getnstimeofday ( & dasd_global_profile_data . starttod ) ;
}
int dasd_profile_on ( struct dasd_profile * profile )
{
struct dasd_profile_info * data ;
data = kzalloc ( sizeof ( * data ) , GFP_KERNEL ) ;
if ( ! data )
return - ENOMEM ;
spin_lock_bh ( & profile - > lock ) ;
if ( profile - > data ) {
spin_unlock_bh ( & profile - > lock ) ;
kfree ( data ) ;
return 0 ;
}
getnstimeofday ( & data - > starttod ) ;
profile - > data = data ;
spin_unlock_bh ( & profile - > lock ) ;
return 0 ;
}
void dasd_profile_off ( struct dasd_profile * profile )
{
spin_lock_bh ( & profile - > lock ) ;
kfree ( profile - > data ) ;
profile - > data = NULL ;
spin_unlock_bh ( & profile - > lock ) ;
}
char * dasd_get_user_string ( const char __user * user_buf , size_t user_len )
{
char * buffer ;
2011-08-03 18:44:20 +04:00
buffer = vmalloc ( user_len + 1 ) ;
2011-07-24 12:48:32 +04:00
if ( buffer = = NULL )
return ERR_PTR ( - ENOMEM ) ;
if ( copy_from_user ( buffer , user_buf , user_len ) ! = 0 ) {
2011-08-03 18:44:20 +04:00
vfree ( buffer ) ;
2011-07-24 12:48:32 +04:00
return ERR_PTR ( - EFAULT ) ;
}
/* got the string, now strip linefeed. */
if ( buffer [ user_len - 1 ] = = ' \n ' )
buffer [ user_len - 1 ] = 0 ;
else
buffer [ user_len ] = 0 ;
return buffer ;
2005-04-17 02:20:36 +04:00
}
2011-07-24 12:48:32 +04:00
static ssize_t dasd_stats_write ( struct file * file ,
const char __user * user_buf ,
size_t user_len , loff_t * pos )
{
char * buffer , * str ;
int rc ;
struct seq_file * m = ( struct seq_file * ) file - > private_data ;
struct dasd_profile * prof = m - > private ;
if ( user_len > 65536 )
user_len = 65536 ;
buffer = dasd_get_user_string ( user_buf , user_len ) ;
if ( IS_ERR ( buffer ) )
return PTR_ERR ( buffer ) ;
str = skip_spaces ( buffer ) ;
rc = user_len ;
if ( strncmp ( str , " reset " , 5 ) = = 0 ) {
dasd_profile_reset ( prof ) ;
} else if ( strncmp ( str , " on " , 2 ) = = 0 ) {
rc = dasd_profile_on ( prof ) ;
if ( ! rc )
rc = user_len ;
} else if ( strncmp ( str , " off " , 3 ) = = 0 ) {
dasd_profile_off ( prof ) ;
} else
rc = - EINVAL ;
2011-08-03 18:44:20 +04:00
vfree ( buffer ) ;
2011-07-24 12:48:32 +04:00
return rc ;
}
static void dasd_stats_array ( struct seq_file * m , unsigned int * array )
{
int i ;
for ( i = 0 ; i < 32 ; i + + )
seq_printf ( m , " %u " , array [ i ] ) ;
seq_putc ( m , ' \n ' ) ;
}
static void dasd_stats_seq_print ( struct seq_file * m ,
struct dasd_profile_info * data )
{
seq_printf ( m , " start_time %ld.%09ld \n " ,
data - > starttod . tv_sec , data - > starttod . tv_nsec ) ;
seq_printf ( m , " total_requests %u \n " , data - > dasd_io_reqs ) ;
seq_printf ( m , " total_sectors %u \n " , data - > dasd_io_sects ) ;
seq_printf ( m , " total_pav %u \n " , data - > dasd_io_alias ) ;
seq_printf ( m , " total_hpf %u \n " , data - > dasd_io_tpm ) ;
seq_printf ( m , " histogram_sectors " ) ;
dasd_stats_array ( m , data - > dasd_io_secs ) ;
seq_printf ( m , " histogram_io_times " ) ;
dasd_stats_array ( m , data - > dasd_io_times ) ;
seq_printf ( m , " histogram_io_times_weighted " ) ;
dasd_stats_array ( m , data - > dasd_io_timps ) ;
seq_printf ( m , " histogram_time_build_to_ssch " ) ;
dasd_stats_array ( m , data - > dasd_io_time1 ) ;
seq_printf ( m , " histogram_time_ssch_to_irq " ) ;
dasd_stats_array ( m , data - > dasd_io_time2 ) ;
seq_printf ( m , " histogram_time_ssch_to_irq_weighted " ) ;
dasd_stats_array ( m , data - > dasd_io_time2ps ) ;
seq_printf ( m , " histogram_time_irq_to_end " ) ;
dasd_stats_array ( m , data - > dasd_io_time3 ) ;
seq_printf ( m , " histogram_ccw_queue_length " ) ;
dasd_stats_array ( m , data - > dasd_io_nr_req ) ;
seq_printf ( m , " total_read_requests %u \n " , data - > dasd_read_reqs ) ;
seq_printf ( m , " total_read_sectors %u \n " , data - > dasd_read_sects ) ;
seq_printf ( m , " total_read_pav %u \n " , data - > dasd_read_alias ) ;
seq_printf ( m , " total_read_hpf %u \n " , data - > dasd_read_tpm ) ;
seq_printf ( m , " histogram_read_sectors " ) ;
dasd_stats_array ( m , data - > dasd_read_secs ) ;
seq_printf ( m , " histogram_read_times " ) ;
dasd_stats_array ( m , data - > dasd_read_times ) ;
seq_printf ( m , " histogram_read_time_build_to_ssch " ) ;
dasd_stats_array ( m , data - > dasd_read_time1 ) ;
seq_printf ( m , " histogram_read_time_ssch_to_irq " ) ;
dasd_stats_array ( m , data - > dasd_read_time2 ) ;
seq_printf ( m , " histogram_read_time_irq_to_end " ) ;
dasd_stats_array ( m , data - > dasd_read_time3 ) ;
seq_printf ( m , " histogram_read_ccw_queue_length " ) ;
dasd_stats_array ( m , data - > dasd_read_nr_req ) ;
}
static int dasd_stats_show ( struct seq_file * m , void * v )
{
struct dasd_profile * profile ;
struct dasd_profile_info * data ;
profile = m - > private ;
spin_lock_bh ( & profile - > lock ) ;
data = profile - > data ;
if ( ! data ) {
spin_unlock_bh ( & profile - > lock ) ;
seq_printf ( m , " disabled \n " ) ;
return 0 ;
}
dasd_stats_seq_print ( m , data ) ;
spin_unlock_bh ( & profile - > lock ) ;
return 0 ;
}
static int dasd_stats_open ( struct inode * inode , struct file * file )
{
struct dasd_profile * profile = inode - > i_private ;
return single_open ( file , dasd_stats_show , profile ) ;
}
static const struct file_operations dasd_stats_raw_fops = {
. owner = THIS_MODULE ,
. open = dasd_stats_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
. write = dasd_stats_write ,
} ;
static ssize_t dasd_stats_global_write ( struct file * file ,
const char __user * user_buf ,
size_t user_len , loff_t * pos )
{
char * buffer , * str ;
ssize_t rc ;
if ( user_len > 65536 )
user_len = 65536 ;
buffer = dasd_get_user_string ( user_buf , user_len ) ;
if ( IS_ERR ( buffer ) )
return PTR_ERR ( buffer ) ;
str = skip_spaces ( buffer ) ;
rc = user_len ;
if ( strncmp ( str , " reset " , 5 ) = = 0 ) {
dasd_global_profile_reset ( ) ;
} else if ( strncmp ( str , " on " , 2 ) = = 0 ) {
dasd_global_profile_reset ( ) ;
dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY ;
} else if ( strncmp ( str , " off " , 3 ) = = 0 ) {
dasd_global_profile_level = DASD_PROFILE_OFF ;
} else
rc = - EINVAL ;
2011-08-03 18:44:20 +04:00
vfree ( buffer ) ;
2011-07-24 12:48:32 +04:00
return rc ;
}
static int dasd_stats_global_show ( struct seq_file * m , void * v )
{
if ( ! dasd_global_profile_level ) {
seq_printf ( m , " disabled \n " ) ;
return 0 ;
}
dasd_stats_seq_print ( m , & dasd_global_profile_data ) ;
return 0 ;
}
static int dasd_stats_global_open ( struct inode * inode , struct file * file )
{
return single_open ( file , dasd_stats_global_show , NULL ) ;
}
static const struct file_operations dasd_stats_global_fops = {
. owner = THIS_MODULE ,
. open = dasd_stats_global_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
. write = dasd_stats_global_write ,
} ;
static void dasd_profile_init ( struct dasd_profile * profile ,
struct dentry * base_dentry )
{
2011-07-24 12:33:43 +04:00
umode_t mode ;
2011-07-24 12:48:32 +04:00
struct dentry * pde ;
if ( ! base_dentry )
return ;
profile - > dentry = NULL ;
profile - > data = NULL ;
mode = ( S_IRUSR | S_IWUSR | S_IFREG ) ;
pde = debugfs_create_file ( " statistics " , mode , base_dentry ,
profile , & dasd_stats_raw_fops ) ;
if ( pde & & ! IS_ERR ( pde ) )
profile - > dentry = pde ;
return ;
}
static void dasd_profile_exit ( struct dasd_profile * profile )
{
dasd_profile_off ( profile ) ;
if ( profile - > dentry ) {
debugfs_remove ( profile - > dentry ) ;
profile - > dentry = NULL ;
}
}
static void dasd_statistics_removeroot ( void )
{
dasd_global_profile_level = DASD_PROFILE_OFF ;
if ( dasd_global_profile_dentry ) {
debugfs_remove ( dasd_global_profile_dentry ) ;
dasd_global_profile_dentry = NULL ;
}
if ( dasd_debugfs_global_entry )
debugfs_remove ( dasd_debugfs_global_entry ) ;
if ( dasd_debugfs_root_entry )
debugfs_remove ( dasd_debugfs_root_entry ) ;
}
static void dasd_statistics_createroot ( void )
{
2011-07-24 12:33:43 +04:00
umode_t mode ;
2011-07-24 12:48:32 +04:00
struct dentry * pde ;
dasd_debugfs_root_entry = NULL ;
dasd_debugfs_global_entry = NULL ;
dasd_global_profile_dentry = NULL ;
pde = debugfs_create_dir ( " dasd " , NULL ) ;
if ( ! pde | | IS_ERR ( pde ) )
goto error ;
dasd_debugfs_root_entry = pde ;
pde = debugfs_create_dir ( " global " , dasd_debugfs_root_entry ) ;
if ( ! pde | | IS_ERR ( pde ) )
goto error ;
dasd_debugfs_global_entry = pde ;
mode = ( S_IRUSR | S_IWUSR | S_IFREG ) ;
pde = debugfs_create_file ( " statistics " , mode , dasd_debugfs_global_entry ,
NULL , & dasd_stats_global_fops ) ;
if ( ! pde | | IS_ERR ( pde ) )
goto error ;
dasd_global_profile_dentry = pde ;
return ;
error :
DBF_EVENT ( DBF_ERR , " %s " ,
" Creation of the dasd debugfs interface failed " ) ;
dasd_statistics_removeroot ( ) ;
return ;
}
2005-04-17 02:20:36 +04:00
# else
2008-01-26 16:11:23 +03:00
# define dasd_profile_start(block, cqr, req) do {} while (0)
# define dasd_profile_end(block, cqr, req) do {} while (0)
2011-07-24 12:48:32 +04:00
static void dasd_statistics_createroot ( void )
{
return ;
}
static void dasd_statistics_removeroot ( void )
{
return ;
}
int dasd_stats_generic_show ( struct seq_file * m , void * v )
{
seq_printf ( m , " Statistics are not activated in this kernel \n " ) ;
return 0 ;
}
static void dasd_profile_init ( struct dasd_profile * profile ,
struct dentry * base_dentry )
{
return ;
}
static void dasd_profile_exit ( struct dasd_profile * profile )
{
return ;
}
int dasd_profile_on ( struct dasd_profile * profile )
{
return 0 ;
}
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_DASD_PROFILE */
/*
* Allocate memory for a channel program with ' cplength ' channel
* command words and ' datasize ' additional space . There are two
* variantes : 1 ) dasd_kmalloc_request uses kmalloc to get the needed
* memory and 2 ) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device .
*/
2009-09-11 12:28:29 +04:00
struct dasd_ccw_req * dasd_kmalloc_request ( int magic , int cplength ,
2008-01-26 16:11:23 +03:00
int datasize ,
struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
/* Sanity checks */
2009-09-11 12:28:29 +04:00
BUG_ON ( datasize > PAGE_SIZE | |
2006-03-24 20:48:13 +03:00
( cplength * sizeof ( struct ccw1 ) ) > PAGE_SIZE ) ;
2005-04-17 02:20:36 +04:00
2006-03-24 14:15:31 +03:00
cqr = kzalloc ( sizeof ( struct dasd_ccw_req ) , GFP_ATOMIC ) ;
2005-04-17 02:20:36 +04:00
if ( cqr = = NULL )
return ERR_PTR ( - ENOMEM ) ;
cqr - > cpaddr = NULL ;
if ( cplength > 0 ) {
2006-03-24 14:15:31 +03:00
cqr - > cpaddr = kcalloc ( cplength , sizeof ( struct ccw1 ) ,
2005-04-17 02:20:36 +04:00
GFP_ATOMIC | GFP_DMA ) ;
if ( cqr - > cpaddr = = NULL ) {
kfree ( cqr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
}
cqr - > data = NULL ;
if ( datasize > 0 ) {
2006-03-24 14:15:31 +03:00
cqr - > data = kzalloc ( datasize , GFP_ATOMIC | GFP_DMA ) ;
2005-04-17 02:20:36 +04:00
if ( cqr - > data = = NULL ) {
2005-11-07 12:01:30 +03:00
kfree ( cqr - > cpaddr ) ;
2005-04-17 02:20:36 +04:00
kfree ( cqr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
}
2009-09-11 12:28:29 +04:00
cqr - > magic = magic ;
2005-04-17 02:20:36 +04:00
set_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ;
dasd_get_device ( device ) ;
return cqr ;
}
2009-09-11 12:28:29 +04:00
struct dasd_ccw_req * dasd_smalloc_request ( int magic , int cplength ,
2008-01-26 16:11:23 +03:00
int datasize ,
struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
struct dasd_ccw_req * cqr ;
char * data ;
int size ;
size = ( sizeof ( struct dasd_ccw_req ) + 7L ) & - 8L ;
if ( cplength > 0 )
size + = cplength * sizeof ( struct ccw1 ) ;
if ( datasize > 0 )
size + = datasize ;
spin_lock_irqsave ( & device - > mem_lock , flags ) ;
cqr = ( struct dasd_ccw_req * )
dasd_alloc_chunk ( & device - > ccw_chunks , size ) ;
spin_unlock_irqrestore ( & device - > mem_lock , flags ) ;
if ( cqr = = NULL )
return ERR_PTR ( - ENOMEM ) ;
memset ( cqr , 0 , sizeof ( struct dasd_ccw_req ) ) ;
data = ( char * ) cqr + ( ( sizeof ( struct dasd_ccw_req ) + 7L ) & - 8L ) ;
cqr - > cpaddr = NULL ;
if ( cplength > 0 ) {
cqr - > cpaddr = ( struct ccw1 * ) data ;
data + = cplength * sizeof ( struct ccw1 ) ;
memset ( cqr - > cpaddr , 0 , cplength * sizeof ( struct ccw1 ) ) ;
}
cqr - > data = NULL ;
if ( datasize > 0 ) {
cqr - > data = data ;
memset ( cqr - > data , 0 , datasize ) ;
}
2009-09-11 12:28:29 +04:00
cqr - > magic = magic ;
2005-04-17 02:20:36 +04:00
set_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ;
dasd_get_device ( device ) ;
return cqr ;
}
/*
* Free memory of a channel program . This function needs to free all the
* idal lists that might have been created by dasd_set_cda and the
* struct dasd_ccw_req itself .
*/
2008-01-26 16:11:23 +03:00
void dasd_kfree_request ( struct dasd_ccw_req * cqr , struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:28 +03:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
struct ccw1 * ccw ;
/* Clear any idals used for the request. */
ccw = cqr - > cpaddr ;
do {
clear_normalized_cda ( ccw ) ;
} while ( ccw + + - > flags & ( CCW_FLAG_CC | CCW_FLAG_DC ) ) ;
# endif
2005-11-07 12:01:30 +03:00
kfree ( cqr - > cpaddr ) ;
kfree ( cqr - > data ) ;
2005-04-17 02:20:36 +04:00
kfree ( cqr ) ;
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_sfree_request ( struct dasd_ccw_req * cqr , struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & device - > mem_lock , flags ) ;
dasd_free_chunk ( & device - > ccw_chunks , cqr ) ;
spin_unlock_irqrestore ( & device - > mem_lock , flags ) ;
dasd_put_device ( device ) ;
}
/*
* Check discipline magic in cqr .
*/
2008-01-26 16:11:23 +03:00
static inline int dasd_check_cqr ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
if ( cqr = = NULL )
return - EINVAL ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
if ( strncmp ( ( char * ) & cqr - > magic , device - > discipline - > ebcname , 4 ) ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device ,
2005-04-17 02:20:36 +04:00
" dasd_ccw_req 0x%08x magic doesn't match "
" discipline 0x%08x " ,
cqr - > magic ,
* ( unsigned int * ) device - > discipline - > name ) ;
return - EINVAL ;
}
return 0 ;
}
/*
* Terminate the current i / o and set the request to clear_pending .
* Timer keeps device runnig .
* ccw_device_clear can fail if the i / o subsystem
* is in a bad mood .
*/
2008-01-26 16:11:23 +03:00
int dasd_term_IO ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int retries , rc ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-04-17 02:20:36 +04:00
/* Check the cqr */
rc = dasd_check_cqr ( cqr ) ;
if ( rc )
return rc ;
retries = 0 ;
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
2005-04-17 02:20:36 +04:00
while ( ( retries < 5 ) & & ( cqr - > status = = DASD_CQR_IN_IO ) ) {
rc = ccw_device_clear ( device - > cdev , ( long ) cqr ) ;
switch ( rc ) {
case 0 : /* termination successful */
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEAR_PENDING ;
2005-04-17 02:20:36 +04:00
cqr - > stopclk = get_clock ( ) ;
2006-08-30 16:33:33 +04:00
cqr - > starttime = 0 ;
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_DEBUG , device ,
" terminate cqr %p successful " ,
cqr ) ;
break ;
case - ENODEV :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" device gone, retry " ) ;
break ;
case - EIO :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" I/O error, retry " ) ;
break ;
case - EINVAL :
case - EBUSY :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" device busy, retry later " ) ;
break ;
default :
2009-03-26 17:23:49 +03:00
/* internal error 10 - unknown rc*/
snprintf ( errorstring , ERRORLENGTH , " 10 %d " , rc ) ;
dev_err ( & device - > cdev - > dev , " An error occurred in the "
" DASD device driver, reason=%s \n " , errorstring ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
break ;
}
retries + + ;
}
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Start the i / o . This start_IO can fail if the channel is really busy .
* In that case set up a timer to start the request later .
*/
2008-01-26 16:11:23 +03:00
int dasd_start_IO ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-04-17 02:20:36 +04:00
/* Check the cqr */
rc = dasd_check_cqr ( cqr ) ;
2009-06-12 12:26:39 +04:00
if ( rc ) {
cqr - > intrc = rc ;
2005-04-17 02:20:36 +04:00
return rc ;
2009-06-12 12:26:39 +04:00
}
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
2011-01-05 14:48:04 +03:00
if ( ( ( cqr - > block & &
test_bit ( DASD_FLAG_LOCK_STOLEN , & cqr - > block - > base - > flags ) ) | |
test_bit ( DASD_FLAG_LOCK_STOLEN , & device - > flags ) ) & &
! test_bit ( DASD_CQR_ALLOW_SLOCK , & cqr - > flags ) ) {
DBF_DEV_EVENT ( DBF_DEBUG , device , " start_IO: return request %p "
" because of stolen lock " , cqr ) ;
cqr - > status = DASD_CQR_ERROR ;
cqr - > intrc = - EPERM ;
return - EPERM ;
}
2005-04-17 02:20:36 +04:00
if ( cqr - > retries < 0 ) {
2009-03-26 17:23:49 +03:00
/* internal error 14 - start_IO run out of retries */
sprintf ( errorstring , " 14 %p " , cqr ) ;
dev_err ( & device - > cdev - > dev , " An error occurred in the DASD "
" device driver, reason=%s \n " , errorstring ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_ERROR ;
2005-04-17 02:20:36 +04:00
return - EIO ;
}
cqr - > startclk = get_clock ( ) ;
cqr - > starttime = jiffies ;
cqr - > retries - - ;
2011-01-05 14:48:03 +03:00
if ( ! test_bit ( DASD_CQR_VERIFY_PATH , & cqr - > flags ) ) {
cqr - > lpm & = device - > path_data . opm ;
if ( ! cqr - > lpm )
cqr - > lpm = device - > path_data . opm ;
}
2009-03-26 17:23:48 +03:00
if ( cqr - > cpmode = = 1 ) {
rc = ccw_device_tm_start ( device - > cdev , cqr - > cpaddr ,
( long ) cqr , cqr - > lpm ) ;
} else {
rc = ccw_device_start ( device - > cdev , cqr - > cpaddr ,
( long ) cqr , cqr - > lpm , 0 ) ;
}
2005-04-17 02:20:36 +04:00
switch ( rc ) {
case 0 :
cqr - > status = DASD_CQR_IN_IO ;
break ;
case - EBUSY :
2011-01-05 14:48:03 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
2005-04-17 02:20:36 +04:00
" start_IO: device busy, retry later " ) ;
break ;
case - ETIMEDOUT :
2011-01-05 14:48:03 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
2005-04-17 02:20:36 +04:00
" start_IO: request timeout, retry later " ) ;
break ;
case - EACCES :
2011-01-05 14:48:03 +03:00
/* -EACCES indicates that the request used only a subset of the
* available paths and all these paths are gone . If the lpm of
* this request was only a subset of the opm ( e . g . the ppm ) then
* we just do a retry with all available paths .
* If we already use the full opm , something is amiss , and we
* need a full path verification .
2005-04-17 02:20:36 +04:00
*/
2011-01-05 14:48:03 +03:00
if ( test_bit ( DASD_CQR_VERIFY_PATH , & cqr - > flags ) ) {
DBF_DEV_EVENT ( DBF_WARNING , device ,
" start_IO: selected paths gone (%x) " ,
cqr - > lpm ) ;
} else if ( cqr - > lpm ! = device - > path_data . opm ) {
cqr - > lpm = device - > path_data . opm ;
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
" start_IO: selected paths gone, "
" retry on all paths " ) ;
} else {
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
" start_IO: all paths in opm gone, "
" do path verification " ) ;
dasd_generic_last_path_gone ( device ) ;
device - > path_data . opm = 0 ;
device - > path_data . ppm = 0 ;
device - > path_data . npm = 0 ;
device - > path_data . tbvpm =
ccw_device_get_path_mask ( device - > cdev ) ;
}
2005-04-17 02:20:36 +04:00
break ;
case - ENODEV :
2011-01-05 14:48:03 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
2009-03-26 17:23:48 +03:00
" start_IO: -ENODEV device gone, retry " ) ;
break ;
2005-04-17 02:20:36 +04:00
case - EIO :
2011-01-05 14:48:03 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
2009-03-26 17:23:48 +03:00
" start_IO: -EIO device gone, retry " ) ;
2005-04-17 02:20:36 +04:00
break ;
2009-06-16 12:30:25 +04:00
case - EINVAL :
/* most likely caused in power management context */
2011-01-05 14:48:03 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
2009-06-16 12:30:25 +04:00
" start_IO: -EINVAL device currently "
" not accessible " ) ;
break ;
2005-04-17 02:20:36 +04:00
default :
2009-03-26 17:23:49 +03:00
/* internal error 11 - unknown rc */
snprintf ( errorstring , ERRORLENGTH , " 11 %d " , rc ) ;
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , errorstring ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
break ;
}
2009-06-12 12:26:39 +04:00
cqr - > intrc = rc ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Timeout function for dasd devices . This is used for different purposes
* 1 ) missing interrupt handler for normal operation
* 2 ) delayed start of request where start_IO failed with - EBUSY
* 3 ) timeout for missing state change interrupts
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1 ) ,
* DASD_CQR_QUEUED for 2 ) and 3 ) .
*/
2008-01-26 16:11:23 +03:00
static void dasd_device_timeout ( unsigned long ptr )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
struct dasd_device * device ;
device = ( struct dasd_device * ) ptr ;
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
/* re-activate request queue */
2009-12-07 14:51:51 +03:00
dasd_device_remove_stop_bits ( device , DASD_STOPPED_PENDING ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Setup timeout for a device in jiffies .
*/
2008-01-26 16:11:23 +03:00
void dasd_device_set_timer ( struct dasd_device * device , int expires )
2005-04-17 02:20:36 +04:00
{
2009-02-11 12:37:31 +03:00
if ( expires = = 0 )
del_timer ( & device - > timer ) ;
else
mod_timer ( & device - > timer , jiffies + expires ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear timeout for a device .
*/
2008-01-26 16:11:23 +03:00
void dasd_device_clear_timer ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2009-02-11 12:37:31 +03:00
del_timer ( & device - > timer ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
static void dasd_handle_killed_request ( struct ccw_device * cdev ,
unsigned long intparm )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
struct dasd_device * device ;
2008-05-15 18:52:36 +04:00
if ( ! intparm )
return ;
2005-04-17 02:20:36 +04:00
cqr = ( struct dasd_ccw_req * ) intparm ;
if ( cqr - > status ! = DASD_CQR_IN_IO ) {
2009-12-07 14:51:52 +03:00
DBF_EVENT_DEVID ( DBF_DEBUG , cdev ,
" invalid status in handle_killed_request: "
" %02x " , cqr - > status ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2010-02-27 00:37:47 +03:00
device = dasd_device_from_cdev_locked ( cdev ) ;
if ( IS_ERR ( device ) ) {
DBF_EVENT_DEVID ( DBF_DEBUG , cdev , " %s " ,
" unable to get device from cdev " ) ;
return ;
}
if ( ! cqr - > startdev | |
device ! = cqr - > startdev | |
strncmp ( cqr - > startdev - > discipline - > ebcname ,
( char * ) & cqr - > magic , 4 ) ) {
2010-01-27 12:12:35 +03:00
DBF_EVENT_DEVID ( DBF_DEBUG , cdev , " %s " ,
" invalid device in request " ) ;
2010-02-27 00:37:47 +03:00
dasd_put_device ( device ) ;
2005-04-17 02:20:36 +04:00
return ;
}
/* Schedule request to be retried. */
cqr - > status = DASD_CQR_QUEUED ;
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_generic_handle_state_change ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-03-24 14:15:25 +03:00
/* First of all start sense subsystem status request. */
dasd_eer_snss ( device ) ;
2009-12-07 14:51:51 +03:00
dasd_device_remove_stop_bits ( device , DASD_STOPPED_PENDING ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
if ( device - > block )
dasd_schedule_block_bh ( device - > block ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Interrupt handler for " normal " ssch - io based dasd devices .
*/
2008-01-26 16:11:23 +03:00
void dasd_int_handler ( struct ccw_device * cdev , unsigned long intparm ,
struct irb * irb )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr , * next ;
struct dasd_device * device ;
unsigned long long now ;
int expires ;
if ( IS_ERR ( irb ) ) {
switch ( PTR_ERR ( irb ) ) {
case - EIO :
break ;
case - ETIMEDOUT :
2009-12-07 14:51:52 +03:00
DBF_EVENT_DEVID ( DBF_WARNING , cdev , " %s: "
" request timed out \n " , __func__ ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2009-12-07 14:51:52 +03:00
DBF_EVENT_DEVID ( DBF_WARNING , cdev , " %s: "
" unknown error %ld \n " , __func__ ,
PTR_ERR ( irb ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-05-15 18:52:36 +04:00
dasd_handle_killed_request ( cdev , intparm ) ;
2005-04-17 02:20:36 +04:00
return ;
}
now = get_clock ( ) ;
2008-01-26 16:11:23 +03:00
cqr = ( struct dasd_ccw_req * ) intparm ;
2011-01-05 14:48:04 +03:00
/* check for conditions that should be handled immediately */
if ( ! cqr | |
! ( scsw_dstat ( & irb - > scsw ) = = ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) & &
scsw_cstat ( & irb - > scsw ) = = 0 ) ) {
2010-10-25 18:10:47 +04:00
if ( cqr )
memcpy ( & cqr - > irb , irb , sizeof ( * irb ) ) ;
2006-09-20 17:59:05 +04:00
device = dasd_device_from_cdev_locked ( cdev ) ;
2010-10-25 18:10:49 +04:00
if ( IS_ERR ( device ) )
return ;
/* ignore unsolicited interrupts for DIAG discipline */
if ( device - > discipline = = dasd_diag_discipline_pointer ) {
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
2010-10-25 18:10:49 +04:00
return ;
2005-04-17 02:20:36 +04:00
}
2011-01-05 14:48:04 +03:00
device - > discipline - > dump_sense_dbf ( device , irb , " int " ) ;
if ( device - > features & DASD_FEATURE_ERPLOG )
device - > discipline - > dump_sense ( device , cqr , irb ) ;
device - > discipline - > check_for_device_change ( device , cqr , irb ) ;
2010-10-25 18:10:49 +04:00
dasd_put_device ( device ) ;
2005-04-17 02:20:36 +04:00
}
2011-01-05 14:48:04 +03:00
if ( ! cqr )
return ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
if ( ! device | |
2005-04-17 02:20:36 +04:00
strncmp ( device - > discipline - > ebcname , ( char * ) & cqr - > magic , 4 ) ) {
2010-01-27 12:12:35 +03:00
DBF_EVENT_DEVID ( DBF_DEBUG , cdev , " %s " ,
" invalid device in request " ) ;
2005-04-17 02:20:36 +04:00
return ;
}
/* Check for clear pending */
2008-01-26 16:11:23 +03:00
if ( cqr - > status = = DASD_CQR_CLEAR_PENDING & &
2009-03-26 17:23:48 +03:00
scsw_fctl ( & irb - > scsw ) & SCSW_FCTL_CLEAR_FUNC ) {
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEARED ;
dasd_device_clear_timer ( device ) ;
2006-08-30 16:33:33 +04:00
wake_up ( & dasd_flush_wq ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2009-03-26 17:23:48 +03:00
/* check status - the request might have been killed by dyn detach */
2005-04-17 02:20:36 +04:00
if ( cqr - > status ! = DASD_CQR_IN_IO ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " invalid status: bus_id %s, "
" status %02x " , dev_name ( & cdev - > dev ) , cqr - > status ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2009-03-26 17:23:49 +03:00
2008-01-26 16:11:23 +03:00
next = NULL ;
2005-04-17 02:20:36 +04:00
expires = 0 ;
2009-03-26 17:23:48 +03:00
if ( scsw_dstat ( & irb - > scsw ) = = ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) & &
scsw_cstat ( & irb - > scsw ) = = 0 ) {
2008-01-26 16:11:23 +03:00
/* request was completed successfully */
cqr - > status = DASD_CQR_SUCCESS ;
2005-04-17 02:20:36 +04:00
cqr - > stopclk = now ;
/* Start first request on queue if possible -> fast_io. */
2008-01-26 16:11:23 +03:00
if ( cqr - > devlist . next ! = & device - > ccw_queue ) {
next = list_entry ( cqr - > devlist . next ,
struct dasd_ccw_req , devlist ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
} else { /* error */
2008-02-05 18:50:46 +03:00
/*
* If we don ' t want complex ERP for this request , then just
* reset this and retry it in the fastpath
2008-01-26 16:11:23 +03:00
*/
2008-02-05 18:50:46 +03:00
if ( ! test_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) & &
2008-01-26 16:11:23 +03:00
cqr - > retries > 0 ) {
2011-01-05 14:48:03 +03:00
if ( cqr - > lpm = = device - > path_data . opm )
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device ,
" default ERP in fastpath "
" (%i retries left) " ,
cqr - > retries ) ;
2011-01-05 14:48:03 +03:00
if ( ! test_bit ( DASD_CQR_VERIFY_PATH , & cqr - > flags ) )
cqr - > lpm = device - > path_data . opm ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_QUEUED ;
next = cqr ;
} else
2005-04-17 02:20:36 +04:00
cqr - > status = DASD_CQR_ERROR ;
2008-01-26 16:11:23 +03:00
}
if ( next & & ( next - > status = = DASD_CQR_QUEUED ) & &
( ! device - > stopped ) ) {
if ( device - > discipline - > start_IO ( next ) = = 0 )
expires = next - > expires ;
2005-04-17 02:20:36 +04:00
}
if ( expires ! = 0 )
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , expires ) ;
2005-04-17 02:20:36 +04:00
else
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
}
2010-05-27 01:27:09 +04:00
enum uc_todo dasd_generic_uc_handler ( struct ccw_device * cdev , struct irb * irb )
{
struct dasd_device * device ;
device = dasd_device_from_cdev_locked ( cdev ) ;
if ( IS_ERR ( device ) )
goto out ;
if ( test_bit ( DASD_FLAG_OFFLINE , & device - > flags ) | |
device - > state ! = device - > target | |
2011-01-05 14:48:04 +03:00
! device - > discipline - > check_for_device_change ) {
2010-05-27 01:27:09 +04:00
dasd_put_device ( device ) ;
goto out ;
}
2011-01-05 14:48:04 +03:00
if ( device - > discipline - > dump_sense_dbf )
device - > discipline - > dump_sense_dbf ( device , irb , " uc " ) ;
device - > discipline - > check_for_device_change ( device , NULL , irb ) ;
2010-05-27 01:27:09 +04:00
dasd_put_device ( device ) ;
out :
return UC_TODO_RETRY ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_uc_handler ) ;
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* If we have an error on a dasd_block layer request then we cancel
* and return all further requests from the same dasd_block as well .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_recovery ( struct dasd_device * device ,
struct dasd_ccw_req * ref_cqr )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
/*
* only requeue request that came from the dasd_block layer
*/
if ( ! ref_cqr - > block )
return ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
list_for_each_safe ( l , n , & device - > ccw_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
if ( cqr - > status = = DASD_CQR_QUEUED & &
ref_cqr - > block = = cqr - > block ) {
cqr - > status = DASD_CQR_CLEARED ;
}
}
} ;
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Remove those ccw requests from the queue that need to be returned
* to the upper layer .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_process_ccw_queue ( struct dasd_device * device ,
struct list_head * final_queue )
2005-04-17 02:20:36 +04:00
{
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
/* Process request with final status. */
list_for_each_safe ( l , n , & device - > ccw_queue ) {
2008-01-26 16:11:23 +03:00
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
2005-04-17 02:20:36 +04:00
/* Stop list processing at the first non-final request. */
2008-01-26 16:11:23 +03:00
if ( cqr - > status = = DASD_CQR_QUEUED | |
cqr - > status = = DASD_CQR_IN_IO | |
cqr - > status = = DASD_CQR_CLEAR_PENDING )
2005-04-17 02:20:36 +04:00
break ;
if ( cqr - > status = = DASD_CQR_ERROR ) {
2008-01-26 16:11:23 +03:00
__dasd_device_recovery ( device , cqr ) ;
2006-03-24 14:15:25 +03:00
}
2005-04-17 02:20:36 +04:00
/* Rechain finished requests to final queue */
2008-01-26 16:11:23 +03:00
list_move_tail ( & cqr - > devlist , final_queue ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
2008-01-26 16:11:23 +03:00
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_process_final_queue ( struct dasd_device * device ,
struct list_head * final_queue )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct list_head * l , * n ;
2005-04-17 02:20:36 +04:00
struct dasd_ccw_req * cqr ;
2008-02-19 17:29:27 +03:00
struct dasd_block * block ;
2008-05-30 12:03:31 +04:00
void ( * callback ) ( struct dasd_ccw_req * , void * data ) ;
void * callback_data ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-05-01 19:58:59 +04:00
2008-01-26 16:11:23 +03:00
list_for_each_safe ( l , n , final_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
list_del_init ( & cqr - > devlist ) ;
2008-02-19 17:29:27 +03:00
block = cqr - > block ;
2008-05-30 12:03:31 +04:00
callback = cqr - > callback ;
callback_data = cqr - > callback_data ;
2008-02-19 17:29:27 +03:00
if ( block )
spin_lock_bh ( & block - > queue_lock ) ;
2008-01-26 16:11:23 +03:00
switch ( cqr - > status ) {
case DASD_CQR_SUCCESS :
cqr - > status = DASD_CQR_DONE ;
break ;
case DASD_CQR_ERROR :
cqr - > status = DASD_CQR_NEED_ERP ;
break ;
case DASD_CQR_CLEARED :
cqr - > status = DASD_CQR_TERMINATED ;
break ;
default :
2009-03-26 17:23:49 +03:00
/* internal error 12 - wrong cqr status*/
snprintf ( errorstring , ERRORLENGTH , " 12 %p %x02 " , cqr , cqr - > status ) ;
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , errorstring ) ;
2008-01-26 16:11:23 +03:00
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
if ( cqr - > callback ! = NULL )
2008-05-30 12:03:31 +04:00
( callback ) ( cqr , callback_data ) ;
2008-02-19 17:29:27 +03:00
if ( block )
spin_unlock_bh ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it reached its expire time . If so , terminate the IO .
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_check_expire ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
if ( list_empty ( & device - > ccw_queue ) )
return ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2006-12-04 17:40:15 +03:00
if ( ( cqr - > status = = DASD_CQR_IN_IO & & cqr - > expires ! = 0 ) & &
( time_after_eq ( jiffies , cqr - > expires + cqr - > starttime ) ) ) {
if ( device - > discipline - > term_IO ( cqr ) ! = 0 ) {
/* Hmpf, try again in 5 sec */
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
2010-08-13 12:06:38 +04:00
" cqr %p timed out (%lus) but cannot be "
2009-03-26 17:23:49 +03:00
" ended, retrying in 5 s \n " ,
cqr , ( cqr - > expires / HZ ) ) ;
2008-01-26 16:11:26 +03:00
cqr - > expires + = 5 * HZ ;
dasd_device_set_timer ( device , 5 * HZ ) ;
2006-12-04 17:40:15 +03:00
} else {
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
2010-08-13 12:06:38 +04:00
" cqr %p timed out (%lus), %i retries "
2009-03-26 17:23:49 +03:00
" remaining \n " , cqr , ( cqr - > expires / HZ ) ,
cqr - > retries ) ;
2005-04-17 02:20:36 +04:00
}
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started .
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_start_head ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
int rc ;
if ( list_empty ( & device - > ccw_queue ) )
return ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2006-04-11 09:53:47 +04:00
if ( cqr - > status ! = DASD_CQR_QUEUED )
return ;
2011-01-05 14:48:03 +03:00
/* when device is stopped, return request to previous layer
* exception : only the disconnect or unresumed bits are set and the
* cqr is a path verification request
*/
if ( device - > stopped & &
! ( ! ( device - > stopped & ~ ( DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM ) )
& & test_bit ( DASD_CQR_VERIFY_PATH , & cqr - > flags ) ) ) {
cqr - > intrc = - EAGAIN ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEARED ;
dasd_schedule_device_bh ( device ) ;
2006-04-11 09:53:47 +04:00
return ;
2006-01-06 11:19:15 +03:00
}
2006-04-11 09:53:47 +04:00
rc = device - > discipline - > start_IO ( cqr ) ;
if ( rc = = 0 )
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , cqr - > expires ) ;
2006-04-11 09:53:47 +04:00
else if ( rc = = - EACCES ) {
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2006-04-11 09:53:47 +04:00
} else
/* Hmpf, try again in 1/2 sec */
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , 50 ) ;
2006-08-30 16:33:33 +04:00
}
2011-01-05 14:48:03 +03:00
static void __dasd_device_check_path_events ( struct dasd_device * device )
{
int rc ;
if ( device - > path_data . tbvpm ) {
if ( device - > stopped & ~ ( DASD_STOPPED_DC_WAIT |
DASD_UNRESUMED_PM ) )
return ;
rc = device - > discipline - > verify_path (
device , device - > path_data . tbvpm ) ;
if ( rc )
dasd_device_set_timer ( device , 50 ) ;
else
device - > path_data . tbvpm = 0 ;
}
} ;
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Go through all request on the dasd_device request queue ,
* terminate them on the cdev if necessary , and return them to the
* submitting layer via callback .
* Note :
* Make sure that all ' submitting layers ' still exist when
* this function is called ! . In other words , when ' device ' is a base
* device then all block layer requests must have been removed before
* via dasd_flush_block_queue .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_flush_device_queue ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_ccw_req * cqr , * n ;
int rc ;
2005-04-17 02:20:36 +04:00
struct list_head flush_queue ;
INIT_LIST_HEAD ( & flush_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2006-08-30 16:33:33 +04:00
rc = 0 ;
2008-01-26 16:11:23 +03:00
list_for_each_entry_safe ( cqr , n , & device - > ccw_queue , devlist ) {
2006-08-30 16:33:33 +04:00
/* Check status and move request to flush_queue */
switch ( cqr - > status ) {
case DASD_CQR_IN_IO :
rc = device - > discipline - > term_IO ( cqr ) ;
if ( rc ) {
/* unable to terminate requeust */
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" Flushing the DASD request queue "
" failed for request %p \n " , cqr ) ;
2006-08-30 16:33:33 +04:00
/* stop flush processing */
goto finished ;
}
break ;
case DASD_CQR_QUEUED :
2005-04-17 02:20:36 +04:00
cqr - > stopclk = get_clock ( ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEARED ;
2006-08-30 16:33:33 +04:00
break ;
2008-01-26 16:11:23 +03:00
default : /* no need to modify the others */
2006-08-30 16:33:33 +04:00
break ;
}
2008-01-26 16:11:23 +03:00
list_move_tail ( & cqr - > devlist , & flush_queue ) ;
2006-08-30 16:33:33 +04:00
}
finished :
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2008-01-26 16:11:23 +03:00
/*
* After this point all requests must be in state CLEAR_PENDING ,
* CLEARED , SUCCESS or ERROR . Now wait for CLEAR_PENDING to become
* one of the others .
*/
list_for_each_entry_safe ( cqr , n , & flush_queue , devlist )
wait_event ( dasd_flush_wq ,
( cqr - > status ! = DASD_CQR_CLEAR_PENDING ) ) ;
/*
* Now set each request back to TERMINATED , DONE or NEED_ERP
* and call the callback function of flushed requests
*/
__dasd_device_process_final_queue ( device , & flush_queue ) ;
2006-08-30 16:33:33 +04:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* Acquire the device lock and process queues for the device .
*/
2008-01-26 16:11:23 +03:00
static void dasd_device_tasklet ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct list_head final_queue ;
atomic_set ( & device - > tasklet_scheduled , 0 ) ;
INIT_LIST_HEAD ( & final_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
/* Check expire time of first request on the ccw queue. */
2008-01-26 16:11:23 +03:00
__dasd_device_check_expire ( device ) ;
/* find final requests on ccw queue */
__dasd_device_process_ccw_queue ( device , & final_queue ) ;
2011-01-05 14:48:03 +03:00
__dasd_device_check_path_events ( device ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
/* Now call the callback function of requests with final status */
2008-01-26 16:11:23 +03:00
__dasd_device_process_final_queue ( device , & final_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2005-04-17 02:20:36 +04:00
/* Now check if the head of the ccw queue needs to be started. */
2008-01-26 16:11:23 +03:00
__dasd_device_start_head ( device ) ;
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
/*
* Schedules a call to dasd_tasklet over the device tasklet .
*/
2008-01-26 16:11:23 +03:00
void dasd_schedule_device_bh ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
/* Protect against rescheduling. */
2006-01-06 11:19:07 +03:00
if ( atomic_cmpxchg ( & device - > tasklet_scheduled , 0 , 1 ) ! = 0 )
2005-04-17 02:20:36 +04:00
return ;
dasd_get_device ( device ) ;
tasklet_hi_schedule ( & device - > tasklet ) ;
}
2009-12-07 14:51:51 +03:00
void dasd_device_set_stop_bits ( struct dasd_device * device , int bits )
{
device - > stopped | = bits ;
}
EXPORT_SYMBOL_GPL ( dasd_device_set_stop_bits ) ;
void dasd_device_remove_stop_bits ( struct dasd_device * device , int bits )
{
device - > stopped & = ~ bits ;
if ( ! device - > stopped )
wake_up ( & generic_waitq ) ;
}
EXPORT_SYMBOL_GPL ( dasd_device_remove_stop_bits ) ;
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the head of the device ccw_queue .
* Start the I / O if possible .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
void dasd_add_request_head ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
unsigned long flags ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_QUEUED ;
list_add ( & cqr - > devlist , & device - > ccw_queue ) ;
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
}
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the tail of the device ccw_queue .
* Start the I / O if possible .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
void dasd_add_request_tail ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
unsigned long flags ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_QUEUED ;
list_add_tail ( & cqr - > devlist , & device - > ccw_queue ) ;
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
}
/*
2008-01-26 16:11:23 +03:00
* Wakeup helper for the ' sleep_on ' functions .
2005-04-17 02:20:36 +04:00
*/
2011-10-30 18:16:57 +04:00
void dasd_wakeup_cb ( struct dasd_ccw_req * cqr , void * data )
2005-04-17 02:20:36 +04:00
{
2010-05-12 11:32:11 +04:00
spin_lock_irq ( get_ccwdev_lock ( cqr - > startdev - > cdev ) ) ;
cqr - > callback_data = DASD_SLEEPON_END_TAG ;
spin_unlock_irq ( get_ccwdev_lock ( cqr - > startdev - > cdev ) ) ;
wake_up ( & generic_waitq ) ;
2005-04-17 02:20:36 +04:00
}
2011-10-30 18:16:57 +04:00
EXPORT_SYMBOL_GPL ( dasd_wakeup_cb ) ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
static inline int _wait_for_wakeup ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2010-05-12 11:32:11 +04:00
rc = ( cqr - > callback_data = = DASD_SLEEPON_END_TAG ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
return rc ;
}
/*
2009-12-07 14:51:51 +03:00
* checks if error recovery is necessary , returns 1 if yes , 0 otherwise .
2005-04-17 02:20:36 +04:00
*/
2009-12-07 14:51:51 +03:00
static int __dasd_sleep_on_erp ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2009-12-07 14:51:51 +03:00
dasd_erp_fn_t erp_fn ;
2006-06-29 16:58:12 +04:00
2009-12-07 14:51:51 +03:00
if ( cqr - > status = = DASD_CQR_FILLED )
return 0 ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2009-12-07 14:51:51 +03:00
if ( test_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ) {
if ( cqr - > status = = DASD_CQR_TERMINATED ) {
device - > discipline - > handle_terminated_request ( cqr ) ;
return 1 ;
}
if ( cqr - > status = = DASD_CQR_NEED_ERP ) {
erp_fn = device - > discipline - > erp_action ( cqr ) ;
erp_fn ( cqr ) ;
return 1 ;
}
if ( cqr - > status = = DASD_CQR_FAILED )
dasd_log_sense ( cqr , & cqr - > irb ) ;
if ( cqr - > refers ) {
__dasd_process_erp ( device , cqr ) ;
return 1 ;
}
}
return 0 ;
}
2006-06-29 16:58:12 +04:00
2009-12-07 14:51:51 +03:00
static int __dasd_sleep_on_loop_condition ( struct dasd_ccw_req * cqr )
{
if ( test_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ) {
if ( cqr - > refers ) /* erp is not done yet */
return 1 ;
return ( ( cqr - > status ! = DASD_CQR_DONE ) & &
( cqr - > status ! = DASD_CQR_FAILED ) ) ;
} else
return ( cqr - > status = = DASD_CQR_FILLED ) ;
}
2006-06-29 16:58:12 +04:00
2009-12-07 14:51:51 +03:00
static int _dasd_sleep_on ( struct dasd_ccw_req * maincqr , int interruptible )
{
struct dasd_device * device ;
int rc ;
struct list_head ccw_queue ;
struct dasd_ccw_req * cqr ;
INIT_LIST_HEAD ( & ccw_queue ) ;
maincqr - > status = DASD_CQR_FILLED ;
device = maincqr - > startdev ;
list_add ( & maincqr - > blocklist , & ccw_queue ) ;
for ( cqr = maincqr ; __dasd_sleep_on_loop_condition ( cqr ) ;
cqr = list_first_entry ( & ccw_queue ,
struct dasd_ccw_req , blocklist ) ) {
if ( __dasd_sleep_on_erp ( cqr ) )
continue ;
if ( cqr - > status ! = DASD_CQR_FILLED ) /* could be failed */
continue ;
2011-01-05 14:48:04 +03:00
if ( test_bit ( DASD_FLAG_LOCK_STOLEN , & device - > flags ) & &
! test_bit ( DASD_CQR_ALLOW_SLOCK , & cqr - > flags ) ) {
cqr - > status = DASD_CQR_FAILED ;
cqr - > intrc = - EPERM ;
continue ;
}
2009-12-07 14:51:51 +03:00
/* Non-temporary stop condition will trigger fail fast */
if ( device - > stopped & ~ DASD_STOPPED_PENDING & &
test_bit ( DASD_CQR_FLAGS_FAILFAST , & cqr - > flags ) & &
( ! dasd_eer_enabled ( device ) ) ) {
cqr - > status = DASD_CQR_FAILED ;
continue ;
}
/* Don't try to start requests if device is stopped */
if ( interruptible ) {
rc = wait_event_interruptible (
generic_waitq , ! ( device - > stopped ) ) ;
if ( rc = = - ERESTARTSYS ) {
cqr - > status = DASD_CQR_FAILED ;
maincqr - > intrc = rc ;
continue ;
}
} else
wait_event ( generic_waitq , ! ( device - > stopped ) ) ;
2011-10-30 18:16:57 +04:00
if ( ! cqr - > callback )
cqr - > callback = dasd_wakeup_cb ;
2010-05-12 11:32:11 +04:00
cqr - > callback_data = DASD_SLEEPON_START_TAG ;
2009-12-07 14:51:51 +03:00
dasd_add_request_tail ( cqr ) ;
if ( interruptible ) {
rc = wait_event_interruptible (
generic_waitq , _wait_for_wakeup ( cqr ) ) ;
if ( rc = = - ERESTARTSYS ) {
dasd_cancel_req ( cqr ) ;
/* wait (non-interruptible) for final status */
wait_event ( generic_waitq ,
_wait_for_wakeup ( cqr ) ) ;
cqr - > status = DASD_CQR_FAILED ;
maincqr - > intrc = rc ;
continue ;
}
} else
wait_event ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
}
maincqr - > endclk = get_clock ( ) ;
if ( ( maincqr - > status ! = DASD_CQR_DONE ) & &
( maincqr - > intrc ! = - ERESTARTSYS ) )
dasd_log_sense ( maincqr , & maincqr - > irb ) ;
if ( maincqr - > status = = DASD_CQR_DONE )
2009-06-12 12:26:39 +04:00
rc = 0 ;
2009-12-07 14:51:51 +03:00
else if ( maincqr - > intrc )
rc = maincqr - > intrc ;
2009-06-12 12:26:39 +04:00
else
rc = - EIO ;
2005-04-17 02:20:36 +04:00
return rc ;
}
2009-12-07 14:51:51 +03:00
/*
* Queue a request to the tail of the device ccw_queue and wait for
* it ' s completion .
*/
int dasd_sleep_on ( struct dasd_ccw_req * cqr )
{
return _dasd_sleep_on ( cqr , 0 ) ;
}
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it ' s completion .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_sleep_on_interruptible ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
2009-12-07 14:51:51 +03:00
return _dasd_sleep_on ( cqr , 1 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Whoa nelly now it gets really hairy . For some functions ( e . g . steal lock
* for eckd devices ) the currently running request has to be terminated
* and be put back to status queued , before the special request is added
* to the head of the queue . Then the special request is waited on normally .
*/
2008-01-26 16:11:23 +03:00
static inline int _dasd_term_running_cqr ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
2011-05-10 19:13:38 +04:00
int rc ;
2005-04-17 02:20:36 +04:00
if ( list_empty ( & device - > ccw_queue ) )
return 0 ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2011-05-10 19:13:38 +04:00
rc = device - > discipline - > term_IO ( cqr ) ;
if ( ! rc )
/*
* CQR terminated because a more important request is pending .
* Undo decreasing of retry counter because this is
* not an error case .
*/
cqr - > retries + + ;
return rc ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
int dasd_sleep_on_immediatly ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2006-06-29 16:58:12 +04:00
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2011-01-05 14:48:04 +03:00
if ( test_bit ( DASD_FLAG_LOCK_STOLEN , & device - > flags ) & &
! test_bit ( DASD_CQR_ALLOW_SLOCK , & cqr - > flags ) ) {
cqr - > status = DASD_CQR_FAILED ;
cqr - > intrc = - EPERM ;
return - EIO ;
}
2005-04-17 02:20:36 +04:00
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
rc = _dasd_term_running_cqr ( device ) ;
if ( rc ) {
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
return rc ;
}
cqr - > callback = dasd_wakeup_cb ;
2010-05-12 11:32:11 +04:00
cqr - > callback_data = DASD_SLEEPON_START_TAG ;
2005-04-17 02:20:36 +04:00
cqr - > status = DASD_CQR_QUEUED ;
2011-10-30 18:16:56 +04:00
/*
* add new request as second
* first the terminated cqr needs to be finished
*/
list_add ( & cqr - > devlist , device - > ccw_queue . next ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2008-05-30 12:03:31 +04:00
wait_event ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
2006-06-29 16:58:12 +04:00
2009-06-12 12:26:39 +04:00
if ( cqr - > status = = DASD_CQR_DONE )
rc = 0 ;
else if ( cqr - > intrc )
rc = cqr - > intrc ;
else
rc = - EIO ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Cancels a request that was started with dasd_sleep_on_req .
* This is useful to timeout requests . The request will be
* terminated if it is currently in i / o .
* Returns 1 if the request has been terminated .
2008-01-26 16:11:23 +03:00
* 0 if there was no need to terminate the request ( not started yet )
* negative error code if termination failed
* Cancellation of a request is an asynchronous operation ! The calling
* function has to wait until the request is properly returned via callback .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_cancel_req ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_device * device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
int rc ;
rc = 0 ;
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
switch ( cqr - > status ) {
case DASD_CQR_QUEUED :
2008-01-26 16:11:23 +03:00
/* request was not started - just set to cleared */
cqr - > status = DASD_CQR_CLEARED ;
2005-04-17 02:20:36 +04:00
break ;
case DASD_CQR_IN_IO :
/* request in IO - terminate IO and release again */
2008-01-26 16:11:23 +03:00
rc = device - > discipline - > term_IO ( cqr ) ;
if ( rc ) {
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" Cancelling request %p failed with rc=%d \n " ,
cqr , rc ) ;
2008-01-26 16:11:23 +03:00
} else {
cqr - > stopclk = get_clock ( ) ;
}
2005-04-17 02:20:36 +04:00
break ;
2008-01-26 16:11:23 +03:00
default : /* already finished or clear pending - do nothing */
2005-04-17 02:20:36 +04:00
break ;
2008-01-26 16:11:23 +03:00
}
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
dasd_schedule_device_bh ( device ) ;
return rc ;
}
/*
* SECTION : Operations of the dasd_block layer .
*/
/*
* Timeout function for dasd_block . This is used when the block layer
* is waiting for something that may not come reliably , ( e . g . a state
* change interrupt )
*/
static void dasd_block_timeout ( unsigned long ptr )
{
unsigned long flags ;
struct dasd_block * block ;
block = ( struct dasd_block * ) ptr ;
spin_lock_irqsave ( get_ccwdev_lock ( block - > base - > cdev ) , flags ) ;
/* re-activate request queue */
2009-12-07 14:51:51 +03:00
dasd_device_remove_stop_bits ( block - > base , DASD_STOPPED_PENDING ) ;
2008-01-26 16:11:23 +03:00
spin_unlock_irqrestore ( get_ccwdev_lock ( block - > base - > cdev ) , flags ) ;
dasd_schedule_block_bh ( block ) ;
}
/*
* Setup timeout for a dasd_block in jiffies .
*/
void dasd_block_set_timer ( struct dasd_block * block , int expires )
{
2009-02-11 12:37:31 +03:00
if ( expires = = 0 )
del_timer ( & block - > timer ) ;
else
mod_timer ( & block - > timer , jiffies + expires ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Clear timeout for a dasd_block .
*/
void dasd_block_clear_timer ( struct dasd_block * block )
{
2009-02-11 12:37:31 +03:00
del_timer ( & block - > timer ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Process finished error recovery ccw .
*/
2009-12-07 14:51:51 +03:00
static void __dasd_process_erp ( struct dasd_device * device ,
struct dasd_ccw_req * cqr )
2008-01-26 16:11:23 +03:00
{
dasd_erp_fn_t erp_fn ;
if ( cqr - > status = = DASD_CQR_DONE )
DBF_DEV_EVENT ( DBF_NOTICE , device , " %s " , " ERP successful " ) ;
else
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev , " ERP failed for the DASD \n " ) ;
2008-01-26 16:11:23 +03:00
erp_fn = device - > discipline - > erp_postaction ( cqr ) ;
erp_fn ( cqr ) ;
}
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
/*
* Fetch requests from the block device queue .
*/
static void __dasd_process_request_queue ( struct dasd_block * block )
{
struct request_queue * queue ;
struct request * req ;
struct dasd_ccw_req * cqr ;
struct dasd_device * basedev ;
unsigned long flags ;
queue = block - > request_queue ;
basedev = block - > base ;
/* No queue ? Then there is nothing to do. */
if ( queue = = NULL )
return ;
/*
* We requeue request from the block device queue to the ccw
* queue only in two states . In state DASD_STATE_READY the
* partition detection is done and we need to requeue requests
* for that . State DASD_STATE_ONLINE is normal block device
* operation .
*/
2009-09-11 12:28:28 +04:00
if ( basedev - > state < DASD_STATE_READY ) {
while ( ( req = blk_fetch_request ( block - > request_queue ) ) )
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
return ;
2009-09-11 12:28:28 +04:00
}
2008-01-26 16:11:23 +03:00
/* Now we try to fetch requests from the request queue */
2011-03-10 10:52:07 +03:00
while ( ( req = blk_peek_request ( queue ) ) ) {
2008-01-26 16:11:23 +03:00
if ( basedev - > features & DASD_FEATURE_READONLY & &
rq_data_dir ( req ) = = WRITE ) {
DBF_DEV_EVENT ( DBF_ERR , basedev ,
" Rejecting write request %p " ,
req ) ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
continue ;
}
cqr = basedev - > discipline - > build_cp ( basedev , block , req ) ;
if ( IS_ERR ( cqr ) ) {
if ( PTR_ERR ( cqr ) = = - EBUSY )
break ; /* normal end condition */
if ( PTR_ERR ( cqr ) = = - ENOMEM )
break ; /* terminate request queue loop */
if ( PTR_ERR ( cqr ) = = - EAGAIN ) {
/*
* The current request cannot be build right
* now , we have to try later . If this request
* is the head - of - queue we stop the device
* for 1 / 2 second .
*/
if ( ! list_empty ( & block - > ccw_queue ) )
break ;
2009-12-07 14:51:51 +03:00
spin_lock_irqsave (
get_ccwdev_lock ( basedev - > cdev ) , flags ) ;
dasd_device_set_stop_bits ( basedev ,
DASD_STOPPED_PENDING ) ;
spin_unlock_irqrestore (
get_ccwdev_lock ( basedev - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
dasd_block_set_timer ( block , HZ / 2 ) ;
break ;
}
DBF_DEV_EVENT ( DBF_ERR , basedev ,
" CCW creation failed (rc=%ld) "
" on request %p " ,
PTR_ERR ( cqr ) , req ) ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
continue ;
}
/*
* Note : callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr - > callback_data = ( void * ) req ;
cqr - > status = DASD_CQR_FILLED ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2008-01-26 16:11:23 +03:00
list_add_tail ( & cqr - > blocklist , & block - > ccw_queue ) ;
dasd_profile_start ( block , cqr , req ) ;
}
}
static void __dasd_cleanup_cqr ( struct dasd_ccw_req * cqr )
{
struct request * req ;
int status ;
2008-01-28 12:29:42 +03:00
int error = 0 ;
2008-01-26 16:11:23 +03:00
req = ( struct request * ) cqr - > callback_data ;
dasd_profile_end ( cqr - > block , cqr , req ) ;
2008-02-05 18:50:47 +03:00
status = cqr - > block - > base - > discipline - > free_cp ( cqr , req ) ;
2008-01-28 12:29:42 +03:00
if ( status < = 0 )
error = status ? status : - EIO ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , error ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Process ccw request queue .
*/
static void __dasd_process_block_ccw_queue ( struct dasd_block * block ,
struct list_head * final_queue )
{
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
dasd_erp_fn_t erp_fn ;
unsigned long flags ;
struct dasd_device * base = block - > base ;
restart :
/* Process request with final status. */
list_for_each_safe ( l , n , & block - > ccw_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , blocklist ) ;
if ( cqr - > status ! = DASD_CQR_DONE & &
cqr - > status ! = DASD_CQR_FAILED & &
cqr - > status ! = DASD_CQR_NEED_ERP & &
cqr - > status ! = DASD_CQR_TERMINATED )
continue ;
if ( cqr - > status = = DASD_CQR_TERMINATED ) {
base - > discipline - > handle_terminated_request ( cqr ) ;
goto restart ;
}
/* Process requests that may be recovered */
if ( cqr - > status = = DASD_CQR_NEED_ERP ) {
2008-02-05 18:50:46 +03:00
erp_fn = base - > discipline - > erp_action ( cqr ) ;
2010-04-22 19:17:02 +04:00
if ( IS_ERR ( erp_fn ( cqr ) ) )
continue ;
2008-01-26 16:11:23 +03:00
goto restart ;
}
2008-11-14 20:18:08 +03:00
/* log sense for fatal error */
if ( cqr - > status = = DASD_CQR_FAILED ) {
dasd_log_sense ( cqr , & cqr - > irb ) ;
}
2008-01-26 16:11:23 +03:00
/* First of all call extended error reporting. */
if ( dasd_eer_enabled ( base ) & &
cqr - > status = = DASD_CQR_FAILED ) {
dasd_eer_write ( base , cqr , DASD_EER_FATALERROR ) ;
/* restart request */
cqr - > status = DASD_CQR_FILLED ;
cqr - > retries = 255 ;
spin_lock_irqsave ( get_ccwdev_lock ( base - > cdev ) , flags ) ;
2009-12-07 14:51:51 +03:00
dasd_device_set_stop_bits ( base , DASD_STOPPED_QUIESCE ) ;
2008-01-26 16:11:23 +03:00
spin_unlock_irqrestore ( get_ccwdev_lock ( base - > cdev ) ,
flags ) ;
goto restart ;
}
/* Process finished ERP request. */
if ( cqr - > refers ) {
2009-12-07 14:51:51 +03:00
__dasd_process_erp ( base , cqr ) ;
2008-01-26 16:11:23 +03:00
goto restart ;
}
/* Rechain finished requests to final queue */
cqr - > endclk = get_clock ( ) ;
list_move_tail ( & cqr - > blocklist , final_queue ) ;
}
}
static void dasd_return_cqr_cb ( struct dasd_ccw_req * cqr , void * data )
{
dasd_schedule_block_bh ( cqr - > block ) ;
}
static void __dasd_block_start_head ( struct dasd_block * block )
{
struct dasd_ccw_req * cqr ;
if ( list_empty ( & block - > ccw_queue ) )
return ;
/* We allways begin with the first requests on the queue, as some
* of previously started requests have to be enqueued on a
* dasd_device again for error recovery .
*/
list_for_each_entry ( cqr , & block - > ccw_queue , blocklist ) {
if ( cqr - > status ! = DASD_CQR_FILLED )
continue ;
2011-01-05 14:48:04 +03:00
if ( test_bit ( DASD_FLAG_LOCK_STOLEN , & block - > base - > flags ) & &
! test_bit ( DASD_CQR_ALLOW_SLOCK , & cqr - > flags ) ) {
cqr - > status = DASD_CQR_FAILED ;
cqr - > intrc = - EPERM ;
dasd_schedule_block_bh ( block ) ;
continue ;
}
2008-01-26 16:11:23 +03:00
/* Non-temporary stop condition will trigger fail fast */
if ( block - > base - > stopped & ~ DASD_STOPPED_PENDING & &
test_bit ( DASD_CQR_FLAGS_FAILFAST , & cqr - > flags ) & &
( ! dasd_eer_enabled ( block - > base ) ) ) {
cqr - > status = DASD_CQR_FAILED ;
dasd_schedule_block_bh ( block ) ;
continue ;
}
/* Don't try to start requests if device is stopped */
if ( block - > base - > stopped )
return ;
/* just a fail safe check, should not happen */
if ( ! cqr - > startdev )
cqr - > startdev = block - > base ;
/* make sure that the requests we submit find their way back */
cqr - > callback = dasd_return_cqr_cb ;
dasd_add_request_tail ( cqr ) ;
}
}
/*
* Central dasd_block layer routine . Takes requests from the generic
* block layer request queue , creates ccw requests , enqueues them on
* a dasd_device and processes ccw requests that have been returned .
*/
static void dasd_block_tasklet ( struct dasd_block * block )
{
struct list_head final_queue ;
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
atomic_set ( & block - > tasklet_scheduled , 0 ) ;
INIT_LIST_HEAD ( & final_queue ) ;
spin_lock ( & block - > queue_lock ) ;
/* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue ( block , & final_queue ) ;
spin_unlock ( & block - > queue_lock ) ;
/* Now call the callback function of requests with final status */
spin_lock_irq ( & block - > request_queue_lock ) ;
list_for_each_safe ( l , n , & final_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , blocklist ) ;
list_del_init ( & cqr - > blocklist ) ;
__dasd_cleanup_cqr ( cqr ) ;
}
spin_lock ( & block - > queue_lock ) ;
/* Get new request from the block device request queue */
__dasd_process_request_queue ( block ) ;
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head ( block ) ;
spin_unlock ( & block - > queue_lock ) ;
spin_unlock_irq ( & block - > request_queue_lock ) ;
dasd_put_device ( block - > base ) ;
}
static void _dasd_wake_block_flush_cb ( struct dasd_ccw_req * cqr , void * data )
{
wake_up ( & dasd_flush_wq ) ;
}
/*
* Go through all request on the dasd_block request queue , cancel them
* on the respective dasd_device , and return them to the generic
* block layer .
*/
static int dasd_flush_block_queue ( struct dasd_block * block )
{
struct dasd_ccw_req * cqr , * n ;
int rc , i ;
struct list_head flush_queue ;
INIT_LIST_HEAD ( & flush_queue ) ;
spin_lock_bh ( & block - > queue_lock ) ;
rc = 0 ;
restart :
list_for_each_entry_safe ( cqr , n , & block - > ccw_queue , blocklist ) {
/* if this request currently owned by a dasd_device cancel it */
if ( cqr - > status > = DASD_CQR_QUEUED )
rc = dasd_cancel_req ( cqr ) ;
if ( rc < 0 )
break ;
/* Rechain request (including erp chain) so it won't be
* touched by the dasd_block_tasklet anymore .
* Replace the callback so we notice when the request
* is returned from the dasd_device layer .
*/
cqr - > callback = _dasd_wake_block_flush_cb ;
for ( i = 0 ; cqr ! = NULL ; cqr = cqr - > refers , i + + )
list_move_tail ( & cqr - > blocklist , & flush_queue ) ;
if ( i > 1 )
/* moved more than one request - need to restart */
goto restart ;
}
spin_unlock_bh ( & block - > queue_lock ) ;
/* Now call the callback function of flushed requests */
restart_cb :
list_for_each_entry_safe ( cqr , n , & flush_queue , blocklist ) {
wait_event ( dasd_flush_wq , ( cqr - > status < DASD_CQR_QUEUED ) ) ;
/* Process finished ERP request. */
if ( cqr - > refers ) {
2008-12-25 15:38:54 +03:00
spin_lock_bh ( & block - > queue_lock ) ;
2009-12-07 14:51:51 +03:00
__dasd_process_erp ( block - > base , cqr ) ;
2008-12-25 15:38:54 +03:00
spin_unlock_bh ( & block - > queue_lock ) ;
2008-01-26 16:11:23 +03:00
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb ;
}
/* call the callback function */
2008-12-25 15:38:54 +03:00
spin_lock_irq ( & block - > request_queue_lock ) ;
2008-01-26 16:11:23 +03:00
cqr - > endclk = get_clock ( ) ;
list_del_init ( & cqr - > blocklist ) ;
__dasd_cleanup_cqr ( cqr ) ;
2008-12-25 15:38:54 +03:00
spin_unlock_irq ( & block - > request_queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
return rc ;
}
/*
2008-01-26 16:11:23 +03:00
* Schedules a call to dasd_tasklet over the device tasklet .
*/
void dasd_schedule_block_bh ( struct dasd_block * block )
{
/* Protect against rescheduling. */
if ( atomic_cmpxchg ( & block - > tasklet_scheduled , 0 , 1 ) ! = 0 )
return ;
/* life cycle of block is bound to it's base device */
dasd_get_device ( block - > base ) ;
tasklet_hi_schedule ( & block - > tasklet ) ;
}
/*
* SECTION : external block device operations
* ( request queue handling , open , release , etc . )
2005-04-17 02:20:36 +04:00
*/
/*
* Dasd request queue function . Called from ll_rw_blk . c
*/
2008-01-26 16:11:23 +03:00
static void do_dasd_request ( struct request_queue * queue )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
block = queue - > queuedata ;
spin_lock ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
/* Get new request from the block device request queue */
2008-01-26 16:11:23 +03:00
__dasd_process_request_queue ( block ) ;
2005-04-17 02:20:36 +04:00
/* Now check if the head of the ccw queue needs to be started. */
2008-01-26 16:11:23 +03:00
__dasd_block_start_head ( block ) ;
spin_unlock ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Allocate and initialize request queue and default I / O scheduler .
*/
2008-01-26 16:11:23 +03:00
static int dasd_alloc_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
int rc ;
2008-01-26 16:11:23 +03:00
block - > request_queue = blk_init_queue ( do_dasd_request ,
& block - > request_queue_lock ) ;
if ( block - > request_queue = = NULL )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2008-01-26 16:11:23 +03:00
block - > request_queue - > queuedata = block ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
elevator_exit ( block - > request_queue - > elevator ) ;
[S390] dasd: fix double elevator_exit call when deadline iosched fails to load
I compiled the kernel without deadline, and the dasd code exits the old
scheduler (CFQ), fails to load the new one (deadline), and then things just
hang - with one of these (sorry about the weird chars - I copy & pasted it
from a 3270 console):
dasd(eckd): 0.0.0151: 3390/0A(CU:3990/01) Cyl:3338 Head:15 Sec:224
------------ cut here ------------
Badness at kernel/mutex.c:134
Modules linked in: dasd_eckd_mod dasd_mod
CPU: 0 Not tainted 2.6.25-rc3 #9
Process exe (pid: 538, task: 000000000d172000, ksp: 000000000d21ef88)
Krnl PSW : 0404000180000000 000000000022fb5c (mutex_lock_nested+0x2a4/0x2cc)
R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
Krnl GPRS: 0000000000024218 000000000076fc78 0000000000000000 000000000000000f
000000000022f92e 0000000000449898 000000000f921c00 000003e000162590
00000000001539c4 000000000d172000 070000007fffffff 000000000d21f400
000000000f8f2560 00000000002413f8 000000000022fb44 000000000d21f400
Krnl Code: 000000000022fb50: bf2f1000 icm %r2,15,0(%r1)
000000000022fb54: a774fef6 brc 7,22f940
000000000022fb58: a7f40001 brc 15,22fb5a
>000000000022fb5c: a7f4fef2 brc 15,22f940
000000000022fb60: c0e5fffa112a brasl %r14,171db4
000000000022fb66: 1222 ltr %r2,%r2
000000000022fb68: a784fedb brc 8,22f91e
000000000022fb6c: c010002a0086 larl %r1,76fc78
Call Trace:
(<000000000022f92e> mutex_lock_nested+0x76/0x2cc)
<00000000001539c4> elevator_exit+0x38/0x80
<0000000000156ffe> blk_cleanup_queue+0x62/0x7c
<000003e0001d5414> dasd_change_state+0xe0/0x8ec
<000003e0001d5cae> dasd_set_target_state+0x8e/0x9c
<000003e0001d5f74> dasd_generic_set_online+0x160/0x284
<000003e00011e83a> dasd_eckd_set_online+0x2e/0x40
<0000000000199bf4> ccw_device_set_online+0x170/0x2c0
<0000000000199d9e> online_store_recog_and_online+0x5a/0x14c
<000000000019a08a> online_store+0xbe/0x2ec
<000000000018456c> dev_attr_store+0x38/0x58
<000000000010efbc> sysfs_write_file+0x130/0x190
<00000000000af582> vfs_write+0xb2/0x160
<00000000000afc7c> sys_write+0x54/0x9c
<0000000000025e16> sys32_write+0x2e/0x50
<0000000000024218> sysc_noemu+0x10/0x16
<0000000077e82bd2> 0x77e82bd2
Set elevator pointer to NULL in order to avoid double elevator_exit
calls when elevator_init call for deadline iosched fails.
Also make sure the dasd device driver depends on IOSCHED_DEADLINE so
the default IO scheduler of the dasd driver is present.
Signed-off-by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
2008-04-17 09:45:56 +04:00
block - > request_queue - > elevator = NULL ;
2008-01-26 16:11:23 +03:00
rc = elevator_init ( block - > request_queue , " deadline " ) ;
2005-04-17 02:20:36 +04:00
if ( rc ) {
2008-01-26 16:11:23 +03:00
blk_cleanup_queue ( block - > request_queue ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
return 0 ;
}
/*
* Allocate and initialize request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_setup_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
int max ;
2011-01-05 14:48:06 +03:00
if ( block - > base - > features & DASD_FEATURE_USERAW ) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
* only need one ccw per track
* so the max_hw_sectors are
* 2048 x 512 B = 1024 kB = 16 tracks
*/
max = 2048 ;
} else {
max = block - > base - > discipline - > max_blocks < < block - > s2b_shift ;
}
blk_queue_logical_block_size ( block - > request_queue ,
block - > bp_block ) ;
2010-02-26 08:20:38 +03:00
blk_queue_max_hw_sectors ( block - > request_queue , max ) ;
2010-02-26 08:20:39 +03:00
blk_queue_max_segments ( block - > request_queue , - 1L ) ;
2009-03-26 17:23:48 +03:00
/* with page sized segments we can translate each segement into
* one idaw / tidaw
*/
blk_queue_max_segment_size ( block - > request_queue , PAGE_SIZE ) ;
blk_queue_segment_boundary ( block - > request_queue , PAGE_SIZE - 1 ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Deactivate and free request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_free_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
if ( block - > request_queue ) {
blk_cleanup_queue ( block - > request_queue ) ;
block - > request_queue = NULL ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Flush request on the request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_flush_request_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
struct request * req ;
2008-01-26 16:11:23 +03:00
if ( ! block - > request_queue )
2005-04-17 02:20:36 +04:00
return ;
2006-06-29 16:58:12 +04:00
2008-01-26 16:11:23 +03:00
spin_lock_irq ( & block - > request_queue_lock ) ;
2009-05-08 06:54:16 +04:00
while ( ( req = blk_fetch_request ( block - > request_queue ) ) )
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
spin_unlock_irq ( & block - > request_queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-02 18:36:08 +03:00
static int dasd_open ( struct block_device * bdev , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2010-02-27 00:37:46 +03:00
struct dasd_device * base ;
2005-04-17 02:20:36 +04:00
int rc ;
2011-04-20 12:15:30 +04:00
base = dasd_device_from_gendisk ( bdev - > bd_disk ) ;
if ( ! base )
2010-02-27 00:37:46 +03:00
return - ENODEV ;
2011-04-20 12:15:30 +04:00
atomic_inc ( & base - > block - > open_count ) ;
2008-01-26 16:11:23 +03:00
if ( test_bit ( DASD_FLAG_OFFLINE , & base - > flags ) ) {
2005-04-17 02:20:36 +04:00
rc = - ENODEV ;
goto unlock ;
}
2008-01-26 16:11:23 +03:00
if ( ! try_module_get ( base - > discipline - > owner ) ) {
2005-04-17 02:20:36 +04:00
rc = - EINVAL ;
goto unlock ;
}
if ( dasd_probeonly ) {
2009-03-26 17:23:49 +03:00
dev_info ( & base - > cdev - > dev ,
" Accessing the DASD failed because it is in "
" probeonly mode \n " ) ;
2005-04-17 02:20:36 +04:00
rc = - EPERM ;
goto out ;
}
2008-01-26 16:11:23 +03:00
if ( base - > state < = DASD_STATE_BASIC ) {
DBF_DEV_EVENT ( DBF_ERR , base , " %s " ,
2005-04-17 02:20:36 +04:00
" Cannot open unrecognized device " ) ;
rc = - ENODEV ;
goto out ;
}
2010-03-08 14:26:24 +03:00
if ( ( mode & FMODE_WRITE ) & &
( test_bit ( DASD_FLAG_DEVICE_RO , & base - > flags ) | |
( base - > features & DASD_FEATURE_READONLY ) ) ) {
rc = - EROFS ;
goto out ;
}
2011-04-20 12:15:30 +04:00
dasd_put_device ( base ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
out :
2008-01-26 16:11:23 +03:00
module_put ( base - > discipline - > owner ) ;
2005-04-17 02:20:36 +04:00
unlock :
2011-04-20 12:15:30 +04:00
atomic_dec ( & base - > block - > open_count ) ;
dasd_put_device ( base ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
2008-03-02 18:36:08 +03:00
static int dasd_release ( struct gendisk * disk , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2011-04-20 12:15:30 +04:00
struct dasd_device * base ;
2005-04-17 02:20:36 +04:00
2011-04-20 12:15:30 +04:00
base = dasd_device_from_gendisk ( disk ) ;
if ( ! base )
return - ENODEV ;
atomic_dec ( & base - > block - > open_count ) ;
module_put ( base - > discipline - > owner ) ;
dasd_put_device ( base ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-01-08 12:02:50 +03:00
/*
* Return disk geometry .
*/
2008-01-26 16:11:23 +03:00
static int dasd_getgeo ( struct block_device * bdev , struct hd_geometry * geo )
2006-01-08 12:02:50 +03:00
{
2008-01-26 16:11:23 +03:00
struct dasd_device * base ;
2006-01-08 12:02:50 +03:00
2011-04-20 12:15:30 +04:00
base = dasd_device_from_gendisk ( bdev - > bd_disk ) ;
if ( ! base )
2006-01-08 12:02:50 +03:00
return - ENODEV ;
2008-01-26 16:11:23 +03:00
if ( ! base - > discipline | |
2011-04-20 12:15:30 +04:00
! base - > discipline - > fill_geometry ) {
dasd_put_device ( base ) ;
2006-01-08 12:02:50 +03:00
return - EINVAL ;
2011-04-20 12:15:30 +04:00
}
base - > discipline - > fill_geometry ( base - > block , geo ) ;
geo - > start = get_start_sect ( bdev ) > > base - > block - > s2b_shift ;
dasd_put_device ( base ) ;
2006-01-08 12:02:50 +03:00
return 0 ;
}
2009-09-22 04:01:13 +04:00
const struct block_device_operations
2005-04-17 02:20:36 +04:00
dasd_device_operations = {
. owner = THIS_MODULE ,
2008-03-02 18:36:08 +03:00
. open = dasd_open ,
. release = dasd_release ,
2009-03-26 17:23:45 +03:00
. ioctl = dasd_ioctl ,
. compat_ioctl = dasd_ioctl ,
2006-01-08 12:02:50 +03:00
. getgeo = dasd_getgeo ,
2005-04-17 02:20:36 +04:00
} ;
2008-01-26 16:11:23 +03:00
/*******************************************************************************
* end of block device operations
*/
2005-04-17 02:20:36 +04:00
static void
dasd_exit ( void )
{
# ifdef CONFIG_PROC_FS
dasd_proc_exit ( ) ;
# endif
2006-03-24 14:15:25 +03:00
dasd_eer_exit ( ) ;
2005-07-27 22:45:03 +04:00
if ( dasd_page_cache ! = NULL ) {
kmem_cache_destroy ( dasd_page_cache ) ;
dasd_page_cache = NULL ;
}
2005-04-17 02:20:36 +04:00
dasd_gendisk_exit ( ) ;
dasd_devmap_exit ( ) ;
if ( dasd_debug_area ! = NULL ) {
debug_unregister ( dasd_debug_area ) ;
dasd_debug_area = NULL ;
}
2011-07-24 12:48:32 +04:00
dasd_statistics_removeroot ( ) ;
2005-04-17 02:20:36 +04:00
}
/*
* SECTION : common functions for ccw_driver use
*/
2010-03-08 14:26:24 +03:00
/*
* Is the device read - only ?
* Note that this function does not report the setting of the
* readonly device attribute , but how it is configured in z / VM .
*/
int dasd_device_is_ro ( struct dasd_device * device )
{
struct ccw_dev_id dev_id ;
struct diag210 diag_data ;
int rc ;
if ( ! MACHINE_IS_VM )
return 0 ;
ccw_device_get_id ( device - > cdev , & dev_id ) ;
memset ( & diag_data , 0 , sizeof ( diag_data ) ) ;
diag_data . vrdcdvno = dev_id . devno ;
diag_data . vrdclen = sizeof ( diag_data ) ;
rc = diag210 ( & diag_data ) ;
if ( rc = = 0 | | rc = = 2 ) {
return diag_data . vrdcvfla & 0x80 ;
} else {
DBF_EVENT ( DBF_WARNING , " diag210 failed for dev=%04x with rc=%d " ,
dev_id . devno , rc ) ;
return 0 ;
}
}
EXPORT_SYMBOL_GPL ( dasd_device_is_ro ) ;
2009-04-14 17:36:23 +04:00
static void dasd_generic_auto_online ( void * data , async_cookie_t cookie )
{
struct ccw_device * cdev = data ;
int ret ;
ret = ccw_device_set_online ( cdev ) ;
if ( ret )
pr_warning ( " %s: Setting the DASD online failed with rc=%d \n " ,
dev_name ( & cdev - > dev ) , ret ) ;
}
2006-01-06 11:19:15 +03:00
/*
* Initial attempt at a probe function . this can be simplified once
* the other detection code is gone .
*/
2008-01-26 16:11:23 +03:00
int dasd_generic_probe ( struct ccw_device * cdev ,
struct dasd_discipline * discipline )
2005-04-17 02:20:36 +04:00
{
int ret ;
ret = dasd_add_sysfs_files ( cdev ) ;
if ( ret ) {
2009-12-07 14:51:52 +03:00
DBF_EVENT_DEVID ( DBF_WARNING , cdev , " %s " ,
" dasd_generic_probe: could not add "
" sysfs entries " ) ;
2006-06-29 17:08:18 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:18 +04:00
cdev - > handler = & dasd_int_handler ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:18 +04:00
/*
* Automatically online either all dasd devices ( dasd_autodetect )
* or all devices specified with dasd = parameters during
* initial probe .
*/
if ( ( dasd_get_feature ( cdev , DASD_FEATURE_INITIAL_ONLINE ) > 0 ) | |
2008-10-10 23:33:09 +04:00
( dasd_autodetect & & dasd_busid_known ( dev_name ( & cdev - > dev ) ) ! = 0 ) )
2009-04-14 17:36:23 +04:00
async_schedule ( dasd_generic_auto_online , cdev ) ;
2008-01-26 16:11:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-01-06 11:19:15 +03:00
/*
* This will one day be called from a global not_oper handler .
* It is also used by driver_unregister during module unload .
*/
2008-01-26 16:11:23 +03:00
void dasd_generic_remove ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
2005-05-17 08:53:39 +04:00
cdev - > handler = NULL ;
2005-04-17 02:20:36 +04:00
dasd_remove_sysfs_files ( cdev ) ;
device = dasd_device_from_cdev ( cdev ) ;
if ( IS_ERR ( device ) )
return ;
if ( test_and_set_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ) {
/* Already doing offline processing */
dasd_put_device ( device ) ;
return ;
}
/*
* This device is removed unconditionally . Set offline
* flag to prevent dasd_open from opening it while it is
* no quite down yet .
*/
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* dasd_delete_device destroys the device reference. */
2008-01-26 16:11:23 +03:00
block = device - > block ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
2008-01-26 16:11:23 +03:00
/*
* life cycle of block is bound to device , so delete it after
* device was safely removed
*/
if ( block )
dasd_free_block ( block ) ;
2005-04-17 02:20:36 +04:00
}
2006-01-06 11:19:15 +03:00
/*
* Activate a device . This is called from dasd_ { eckd , fba } _probe ( ) when either
2005-04-17 02:20:36 +04:00
* the device is detected for the first time and is supposed to be used
2006-01-06 11:19:15 +03:00
* or the user has started activation through sysfs .
*/
2008-01-26 16:11:23 +03:00
int dasd_generic_set_online ( struct ccw_device * cdev ,
struct dasd_discipline * base_discipline )
2005-04-17 02:20:36 +04:00
{
2006-02-21 05:28:13 +03:00
struct dasd_discipline * discipline ;
2005-04-17 02:20:36 +04:00
struct dasd_device * device ;
2005-09-04 02:57:58 +04:00
int rc ;
2005-05-01 19:58:59 +04:00
2006-06-29 17:08:18 +04:00
/* first online clears initial online feature flag */
dasd_set_feature ( cdev , DASD_FEATURE_INITIAL_ONLINE , 0 ) ;
2005-04-17 02:20:36 +04:00
device = dasd_create_device ( cdev ) ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
2006-02-21 05:28:13 +03:00
discipline = base_discipline ;
2005-09-04 02:57:58 +04:00
if ( device - > features & DASD_FEATURE_USEDIAG ) {
2005-04-17 02:20:36 +04:00
if ( ! dasd_diag_discipline_pointer ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online failed because "
" of missing DIAG discipline \n " ,
dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
return - ENODEV ;
}
discipline = dasd_diag_discipline_pointer ;
}
2006-02-21 05:28:13 +03:00
if ( ! try_module_get ( base_discipline - > owner ) ) {
dasd_delete_device ( device ) ;
return - EINVAL ;
}
if ( ! try_module_get ( discipline - > owner ) ) {
module_put ( base_discipline - > owner ) ;
dasd_delete_device ( device ) ;
return - EINVAL ;
}
device - > base_discipline = base_discipline ;
2005-04-17 02:20:36 +04:00
device - > discipline = discipline ;
2008-01-26 16:11:23 +03:00
/* check_device will allocate block device if necessary */
2005-04-17 02:20:36 +04:00
rc = discipline - > check_device ( device ) ;
if ( rc ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online with discipline %s "
" failed with rc=%i \n " ,
dev_name ( & cdev - > dev ) , discipline - > name , rc ) ;
2006-02-21 05:28:13 +03:00
module_put ( discipline - > owner ) ;
module_put ( base_discipline - > owner ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
return rc ;
}
dasd_set_target_state ( device , DASD_STATE_ONLINE ) ;
if ( device - > state < = DASD_STATE_KNOWN ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online failed because of a "
" missing discipline \n " , dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
rc = - ENODEV ;
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
2008-01-26 16:11:23 +03:00
if ( device - > block )
dasd_free_block ( device - > block ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
} else
pr_debug ( " dasd_generic device %s found \n " ,
2008-10-10 23:33:09 +04:00
dev_name ( & cdev - > dev ) ) ;
2010-02-27 00:37:47 +03:00
wait_event ( dasd_init_waitq , _wait_for_device ( device ) ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
return rc ;
}
2008-01-26 16:11:23 +03:00
int dasd_generic_set_offline ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2006-04-11 09:53:47 +04:00
int max_count , open_count ;
2005-04-17 02:20:36 +04:00
device = dasd_device_from_cdev ( cdev ) ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
if ( test_and_set_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ) {
/* Already doing offline processing */
dasd_put_device ( device ) ;
return 0 ;
}
/*
* We must make sure that this device is currently not in use .
* The open_count is increased for every opener , that includes
* the blkdev_get in dasd_scan_partitions . We are only interested
* in the other openers .
*/
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
2008-04-17 09:46:26 +04:00
max_count = device - > block - > bdev ? 0 : - 1 ;
open_count = atomic_read ( & device - > block - > open_count ) ;
2008-01-26 16:11:23 +03:00
if ( open_count > max_count ) {
if ( open_count > 0 )
2009-03-26 17:23:49 +03:00
pr_warning ( " %s: The DASD cannot be set offline "
" with open count %i \n " ,
dev_name ( & cdev - > dev ) , open_count ) ;
2008-01-26 16:11:23 +03:00
else
2009-03-26 17:23:49 +03:00
pr_warning ( " %s: The DASD cannot be set offline "
" while it is in use \n " ,
dev_name ( & cdev - > dev ) ) ;
2008-01-26 16:11:23 +03:00
clear_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ;
dasd_put_device ( device ) ;
return - EBUSY ;
}
2005-04-17 02:20:36 +04:00
}
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* dasd_delete_device destroys the device reference. */
2008-01-26 16:11:23 +03:00
block = device - > block ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
2008-01-26 16:11:23 +03:00
/*
* life cycle of block is bound to device , so delete it after
* device was safely removed
*/
if ( block )
dasd_free_block ( block ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2011-01-05 14:48:03 +03:00
int dasd_generic_last_path_gone ( struct dasd_device * device )
{
struct dasd_ccw_req * cqr ;
dev_warn ( & device - > cdev - > dev , " No operational channel path is left "
" for the device \n " ) ;
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " , " last path gone " ) ;
/* First of all call extended error reporting. */
dasd_eer_write ( device , NULL , DASD_EER_NOPATH ) ;
if ( device - > state < DASD_STATE_BASIC )
return 0 ;
/* Device is active. We want to keep it. */
list_for_each_entry ( cqr , & device - > ccw_queue , devlist )
if ( ( cqr - > status = = DASD_CQR_IN_IO ) | |
( cqr - > status = = DASD_CQR_CLEAR_PENDING ) ) {
cqr - > status = DASD_CQR_QUEUED ;
cqr - > retries + + ;
}
dasd_device_set_stop_bits ( device , DASD_STOPPED_DC_WAIT ) ;
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
return 1 ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_last_path_gone ) ;
int dasd_generic_path_operational ( struct dasd_device * device )
{
dev_info ( & device - > cdev - > dev , " A channel path to the device has become "
" operational \n " ) ;
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " , " path operational " ) ;
dasd_device_remove_stop_bits ( device , DASD_STOPPED_DC_WAIT ) ;
if ( device - > stopped & DASD_UNRESUMED_PM ) {
dasd_device_remove_stop_bits ( device , DASD_UNRESUMED_PM ) ;
dasd_restore_device ( device ) ;
return 1 ;
}
dasd_schedule_device_bh ( device ) ;
if ( device - > block )
dasd_schedule_block_bh ( device - > block ) ;
return 1 ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_path_operational ) ;
2008-01-26 16:11:23 +03:00
int dasd_generic_notify ( struct ccw_device * cdev , int event )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int ret ;
2008-08-21 21:46:39 +04:00
device = dasd_device_from_cdev_locked ( cdev ) ;
2005-04-17 02:20:36 +04:00
if ( IS_ERR ( device ) )
return 0 ;
ret = 0 ;
switch ( event ) {
case CIO_GONE :
2009-03-31 21:16:05 +04:00
case CIO_BOXED :
2005-04-17 02:20:36 +04:00
case CIO_NO_PATH :
2011-01-05 14:48:03 +03:00
device - > path_data . opm = 0 ;
device - > path_data . ppm = 0 ;
device - > path_data . npm = 0 ;
ret = dasd_generic_last_path_gone ( device ) ;
2005-04-17 02:20:36 +04:00
break ;
case CIO_OPER :
ret = 1 ;
2011-01-05 14:48:03 +03:00
if ( device - > path_data . opm )
ret = dasd_generic_path_operational ( device ) ;
2005-04-17 02:20:36 +04:00
break ;
}
dasd_put_device ( device ) ;
return ret ;
}
2011-01-05 14:48:03 +03:00
void dasd_generic_path_event ( struct ccw_device * cdev , int * path_event )
{
int chp ;
__u8 oldopm , eventlpm ;
struct dasd_device * device ;
device = dasd_device_from_cdev_locked ( cdev ) ;
if ( IS_ERR ( device ) )
return ;
for ( chp = 0 ; chp < 8 ; chp + + ) {
eventlpm = 0x80 > > chp ;
if ( path_event [ chp ] & PE_PATH_GONE ) {
oldopm = device - > path_data . opm ;
device - > path_data . opm & = ~ eventlpm ;
device - > path_data . ppm & = ~ eventlpm ;
device - > path_data . npm & = ~ eventlpm ;
if ( oldopm & & ! device - > path_data . opm )
dasd_generic_last_path_gone ( device ) ;
}
if ( path_event [ chp ] & PE_PATH_AVAILABLE ) {
device - > path_data . opm & = ~ eventlpm ;
device - > path_data . ppm & = ~ eventlpm ;
device - > path_data . npm & = ~ eventlpm ;
device - > path_data . tbvpm | = eventlpm ;
dasd_schedule_device_bh ( device ) ;
}
2012-01-18 21:03:41 +04:00
if ( path_event [ chp ] & PE_PATHGROUP_ESTABLISHED ) {
DBF_DEV_EVENT ( DBF_WARNING , device , " %s " ,
" Pathgroup re-established \n " ) ;
if ( device - > discipline - > kick_validate )
device - > discipline - > kick_validate ( device ) ;
}
2011-01-05 14:48:03 +03:00
}
dasd_put_device ( device ) ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_path_event ) ;
int dasd_generic_verify_path ( struct dasd_device * device , __u8 lpm )
{
if ( ! device - > path_data . opm & & lpm ) {
device - > path_data . opm = lpm ;
dasd_generic_path_operational ( device ) ;
} else
device - > path_data . opm | = lpm ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_verify_path ) ;
2009-06-16 12:30:25 +04:00
int dasd_generic_pm_freeze ( struct ccw_device * cdev )
{
struct dasd_ccw_req * cqr , * n ;
int rc ;
struct list_head freeze_queue ;
struct dasd_device * device = dasd_device_from_cdev ( cdev ) ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
2011-01-05 14:48:05 +03:00
2011-10-30 18:17:09 +04:00
/* mark device as suspended */
set_bit ( DASD_FLAG_SUSPENDED , & device - > flags ) ;
2011-01-05 14:48:05 +03:00
if ( device - > discipline - > freeze )
rc = device - > discipline - > freeze ( device ) ;
2009-06-16 12:30:25 +04:00
/* disallow new I/O */
2009-12-07 14:51:51 +03:00
dasd_device_set_stop_bits ( device , DASD_STOPPED_PM ) ;
2009-06-16 12:30:25 +04:00
/* clear active requests */
INIT_LIST_HEAD ( & freeze_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( cdev ) ) ;
rc = 0 ;
list_for_each_entry_safe ( cqr , n , & device - > ccw_queue , devlist ) {
/* Check status and move request to flush_queue */
if ( cqr - > status = = DASD_CQR_IN_IO ) {
rc = device - > discipline - > term_IO ( cqr ) ;
if ( rc ) {
/* unable to terminate requeust */
dev_err ( & device - > cdev - > dev ,
" Unable to terminate request %p "
" on suspend \n " , cqr ) ;
spin_unlock_irq ( get_ccwdev_lock ( cdev ) ) ;
dasd_put_device ( device ) ;
return rc ;
}
}
list_move_tail ( & cqr - > devlist , & freeze_queue ) ;
}
spin_unlock_irq ( get_ccwdev_lock ( cdev ) ) ;
list_for_each_entry_safe ( cqr , n , & freeze_queue , devlist ) {
wait_event ( dasd_flush_wq ,
( cqr - > status ! = DASD_CQR_CLEAR_PENDING ) ) ;
if ( cqr - > status = = DASD_CQR_CLEARED )
cqr - > status = DASD_CQR_QUEUED ;
}
/* move freeze_queue to start of the ccw_queue */
spin_lock_irq ( get_ccwdev_lock ( cdev ) ) ;
list_splice_tail ( & freeze_queue , & device - > ccw_queue ) ;
spin_unlock_irq ( get_ccwdev_lock ( cdev ) ) ;
dasd_put_device ( device ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( dasd_generic_pm_freeze ) ;
int dasd_generic_restore_device ( struct ccw_device * cdev )
{
struct dasd_device * device = dasd_device_from_cdev ( cdev ) ;
int rc = 0 ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
2009-06-22 14:08:17 +04:00
/* allow new IO again */
2009-12-07 14:51:51 +03:00
dasd_device_remove_stop_bits ( device ,
( DASD_STOPPED_PM | DASD_UNRESUMED_PM ) ) ;
2009-06-22 14:08:17 +04:00
2009-06-16 12:30:25 +04:00
dasd_schedule_device_bh ( device ) ;
2009-12-07 14:51:51 +03:00
/*
* call discipline restore function
* if device is stopped do nothing e . g . for disconnected devices
*/
if ( device - > discipline - > restore & & ! ( device - > stopped ) )
2009-06-16 12:30:25 +04:00
rc = device - > discipline - > restore ( device ) ;
2009-12-07 14:51:51 +03:00
if ( rc | | device - > stopped )
2009-06-22 14:08:17 +04:00
/*
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
*/
device - > stopped | = DASD_UNRESUMED_PM ;
2009-06-16 12:30:25 +04:00
2009-10-06 12:34:15 +04:00
if ( device - > block )
dasd_schedule_block_bh ( device - > block ) ;
2011-10-30 18:17:09 +04:00
clear_bit ( DASD_FLAG_SUSPENDED , & device - > flags ) ;
2009-06-16 12:30:25 +04:00
dasd_put_device ( device ) ;
2009-06-22 14:08:17 +04:00
return 0 ;
2009-06-16 12:30:25 +04:00
}
EXPORT_SYMBOL_GPL ( dasd_generic_restore_device ) ;
2007-05-10 17:45:46 +04:00
static struct dasd_ccw_req * dasd_generic_build_rdc ( struct dasd_device * device ,
void * rdc_buffer ,
int rdc_buffer_size ,
2009-09-11 12:28:29 +04:00
int magic )
2007-05-04 20:47:51 +04:00
{
struct dasd_ccw_req * cqr ;
struct ccw1 * ccw ;
2009-10-14 14:43:48 +04:00
unsigned long * idaw ;
2007-05-04 20:47:51 +04:00
cqr = dasd_smalloc_request ( magic , 1 /* RDC */ , rdc_buffer_size , device ) ;
if ( IS_ERR ( cqr ) ) {
2009-03-26 17:23:49 +03:00
/* internal error 13 - Allocating the RDC request failed*/
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , " 13 " ) ;
2007-05-04 20:47:51 +04:00
return cqr ;
}
ccw = cqr - > cpaddr ;
ccw - > cmd_code = CCW_CMD_RDC ;
2009-10-14 14:43:48 +04:00
if ( idal_is_needed ( rdc_buffer , rdc_buffer_size ) ) {
idaw = ( unsigned long * ) ( cqr - > data ) ;
ccw - > cda = ( __u32 ) ( addr_t ) idaw ;
ccw - > flags = CCW_FLAG_IDA ;
idaw = idal_create_words ( idaw , rdc_buffer , rdc_buffer_size ) ;
} else {
ccw - > cda = ( __u32 ) ( addr_t ) rdc_buffer ;
ccw - > flags = 0 ;
}
2007-05-04 20:47:51 +04:00
2009-10-14 14:43:48 +04:00
ccw - > count = rdc_buffer_size ;
2008-01-26 16:11:23 +03:00
cqr - > startdev = device ;
cqr - > memdev = device ;
2007-05-04 20:47:51 +04:00
cqr - > expires = 10 * HZ ;
2009-12-07 14:51:51 +03:00
cqr - > retries = 256 ;
2007-05-04 20:47:51 +04:00
cqr - > buildclk = get_clock ( ) ;
cqr - > status = DASD_CQR_FILLED ;
return cqr ;
}
2009-09-11 12:28:29 +04:00
int dasd_generic_read_dev_chars ( struct dasd_device * device , int magic ,
2009-06-12 12:26:37 +04:00
void * rdc_buffer , int rdc_buffer_size )
2007-05-04 20:47:51 +04:00
{
int ret ;
struct dasd_ccw_req * cqr ;
2009-06-12 12:26:37 +04:00
cqr = dasd_generic_build_rdc ( device , rdc_buffer , rdc_buffer_size ,
2007-05-04 20:47:51 +04:00
magic ) ;
if ( IS_ERR ( cqr ) )
return PTR_ERR ( cqr ) ;
ret = dasd_sleep_on ( cqr ) ;
2008-01-26 16:11:23 +03:00
dasd_sfree_request ( cqr , cqr - > memdev ) ;
2007-05-04 20:47:51 +04:00
return ret ;
}
2007-05-10 17:45:45 +04:00
EXPORT_SYMBOL_GPL ( dasd_generic_read_dev_chars ) ;
2006-03-24 14:15:25 +03:00
2009-03-26 17:23:48 +03:00
/*
* In command mode and transport mode we need to look for sense
* data in different places . The sense data itself is allways
* an array of 32 bytes , so we can unify the sense data access
* for both modes .
*/
char * dasd_get_sense ( struct irb * irb )
{
struct tsb * tsb = NULL ;
char * sense = NULL ;
if ( scsw_is_tm ( & irb - > scsw ) & & ( irb - > scsw . tm . fcxs = = 0x01 ) ) {
if ( irb - > scsw . tm . tcw )
tsb = tcw_get_tsb ( ( struct tcw * ) ( unsigned long )
irb - > scsw . tm . tcw ) ;
if ( tsb & & tsb - > length = = 64 & & tsb - > flags )
switch ( tsb - > flags & 0x07 ) {
case 1 : /* tsa_iostat */
sense = tsb - > tsa . iostat . sense ;
break ;
case 2 : /* tsa_ddpc */
sense = tsb - > tsa . ddpc . sense ;
break ;
default :
/* currently we don't use interrogate data */
break ;
}
} else if ( irb - > esw . esw0 . erw . cons ) {
sense = irb - > ecw ;
}
return sense ;
}
EXPORT_SYMBOL_GPL ( dasd_get_sense ) ;
2008-01-26 16:11:23 +03:00
static int __init dasd_init ( void )
2005-04-17 02:20:36 +04:00
{
int rc ;
init_waitqueue_head ( & dasd_init_waitq ) ;
2006-08-30 16:33:33 +04:00
init_waitqueue_head ( & dasd_flush_wq ) ;
2008-05-30 12:03:31 +04:00
init_waitqueue_head ( & generic_waitq ) ;
2005-04-17 02:20:36 +04:00
/* register 'common' DASD debug area, used for all DBF_XXX calls */
2008-01-26 16:11:30 +03:00
dasd_debug_area = debug_register ( " dasd " , 1 , 1 , 8 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( dasd_debug_area = = NULL ) {
rc = - ENOMEM ;
goto failed ;
}
debug_register_view ( dasd_debug_area , & debug_sprintf_view ) ;
2006-09-20 17:59:07 +04:00
debug_set_level ( dasd_debug_area , DBF_WARNING ) ;
2005-04-17 02:20:36 +04:00
DBF_EVENT ( DBF_EMERG , " %s " , " debug area created " ) ;
dasd_diag_discipline_pointer = NULL ;
2011-07-24 12:48:32 +04:00
dasd_statistics_createroot ( ) ;
2005-04-17 02:20:36 +04:00
rc = dasd_devmap_init ( ) ;
if ( rc )
goto failed ;
rc = dasd_gendisk_init ( ) ;
if ( rc )
goto failed ;
rc = dasd_parse ( ) ;
if ( rc )
goto failed ;
2006-03-24 14:15:25 +03:00
rc = dasd_eer_init ( ) ;
if ( rc )
goto failed ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PROC_FS
rc = dasd_proc_init ( ) ;
if ( rc )
goto failed ;
# endif
return 0 ;
failed :
2009-03-26 17:23:49 +03:00
pr_info ( " The DASD device driver could not be initialized \n " ) ;
2005-04-17 02:20:36 +04:00
dasd_exit ( ) ;
return rc ;
}
module_init ( dasd_init ) ;
module_exit ( dasd_exit ) ;
EXPORT_SYMBOL ( dasd_debug_area ) ;
EXPORT_SYMBOL ( dasd_diag_discipline_pointer ) ;
EXPORT_SYMBOL ( dasd_add_request_head ) ;
EXPORT_SYMBOL ( dasd_add_request_tail ) ;
EXPORT_SYMBOL ( dasd_cancel_req ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_device_clear_timer ) ;
EXPORT_SYMBOL ( dasd_block_clear_timer ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_enable_device ) ;
EXPORT_SYMBOL ( dasd_int_handler ) ;
EXPORT_SYMBOL ( dasd_kfree_request ) ;
EXPORT_SYMBOL ( dasd_kick_device ) ;
EXPORT_SYMBOL ( dasd_kmalloc_request ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_schedule_device_bh ) ;
EXPORT_SYMBOL ( dasd_schedule_block_bh ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_set_target_state ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_device_set_timer ) ;
EXPORT_SYMBOL ( dasd_block_set_timer ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_sfree_request ) ;
EXPORT_SYMBOL ( dasd_sleep_on ) ;
EXPORT_SYMBOL ( dasd_sleep_on_immediatly ) ;
EXPORT_SYMBOL ( dasd_sleep_on_interruptible ) ;
EXPORT_SYMBOL ( dasd_smalloc_request ) ;
EXPORT_SYMBOL ( dasd_start_IO ) ;
EXPORT_SYMBOL ( dasd_term_IO ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_probe ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_remove ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_notify ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_set_online ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_set_offline ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL_GPL ( dasd_generic_handle_state_change ) ;
EXPORT_SYMBOL_GPL ( dasd_flush_device_queue ) ;
EXPORT_SYMBOL_GPL ( dasd_alloc_block ) ;
EXPORT_SYMBOL_GPL ( dasd_free_block ) ;