2005-04-17 02:20:36 +04:00
/*
* File . . . . . . . . . . . : linux / drivers / s390 / block / dasd . c
* Author ( s ) . . . . . . : Holger Smolinski < Holger . Smolinski @ de . ibm . com >
* Horst Hummel < Horst . Hummel @ de . ibm . com >
* Carsten Otte < Cotte @ de . ibm . com >
* Martin Schwidefsky < schwidefsky @ de . ibm . com >
* Bugreports . to . . : < Linux390 @ de . ibm . com >
* ( C ) IBM Corporation , IBM Deutschland Entwicklung GmbH , 1999 - 2001
*
*/
2009-03-26 17:23:49 +03:00
# define KMSG_COMPONENT "dasd"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2005-04-17 02:20:36 +04:00
# include <linux/kmod.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/ctype.h>
# include <linux/major.h>
# include <linux/slab.h>
# include <linux/buffer_head.h>
2006-01-08 12:02:50 +03:00
# include <linux/hdreg.h>
2009-04-14 17:36:23 +04:00
# include <linux/async.h>
2005-04-17 02:20:36 +04:00
# include <asm/ccwdev.h>
# include <asm/ebcdic.h>
# include <asm/idals.h>
# include <asm/todclk.h>
2009-03-26 17:23:48 +03:00
# include <asm/itcw.h>
2005-04-17 02:20:36 +04:00
/* This is ugly... */
# define PRINTK_HEADER "dasd:"
# include "dasd_int.h"
/*
* SECTION : Constant definitions to be used within this file
*/
# define DASD_CHANQ_MAX_SIZE 4
/*
* SECTION : exported variables of dasd . c
*/
debug_info_t * dasd_debug_area ;
struct dasd_discipline * dasd_diag_discipline_pointer ;
2007-02-05 23:16:47 +03:00
void dasd_int_handler ( struct ccw_device * , unsigned long , struct irb * ) ;
2005-04-17 02:20:36 +04:00
MODULE_AUTHOR ( " Holger Smolinski <Holger.Smolinski@de.ibm.com> " ) ;
MODULE_DESCRIPTION ( " Linux on S/390 DASD device driver, "
" Copyright 2000 IBM Corporation " ) ;
MODULE_SUPPORTED_DEVICE ( " dasd " ) ;
MODULE_LICENSE ( " GPL " ) ;
/*
* SECTION : prototypes for static functions of dasd . c
*/
2008-01-26 16:11:23 +03:00
static int dasd_alloc_queue ( struct dasd_block * ) ;
static void dasd_setup_queue ( struct dasd_block * ) ;
static void dasd_free_queue ( struct dasd_block * ) ;
static void dasd_flush_request_queue ( struct dasd_block * ) ;
static int dasd_flush_block_queue ( struct dasd_block * ) ;
static void dasd_device_tasklet ( struct dasd_device * ) ;
static void dasd_block_tasklet ( struct dasd_block * ) ;
2006-12-06 22:18:20 +03:00
static void do_kick_device ( struct work_struct * ) ;
2008-01-26 16:11:23 +03:00
static void dasd_return_cqr_cb ( struct dasd_ccw_req * , void * ) ;
2009-02-11 12:37:31 +03:00
static void dasd_device_timeout ( unsigned long ) ;
static void dasd_block_timeout ( unsigned long ) ;
2005-04-17 02:20:36 +04:00
/*
* SECTION : Operations on the device structure .
*/
static wait_queue_head_t dasd_init_waitq ;
2006-08-30 16:33:33 +04:00
static wait_queue_head_t dasd_flush_wq ;
2008-05-30 12:03:31 +04:00
static wait_queue_head_t generic_waitq ;
2005-04-17 02:20:36 +04:00
/*
* Allocate memory for a new device structure .
*/
2008-01-26 16:11:23 +03:00
struct dasd_device * dasd_alloc_device ( void )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
device = kzalloc ( sizeof ( struct dasd_device ) , GFP_ATOMIC ) ;
if ( ! device )
2005-04-17 02:20:36 +04:00
return ERR_PTR ( - ENOMEM ) ;
/* Get two pages for normal block device operations. */
device - > ccw_mem = ( void * ) __get_free_pages ( GFP_ATOMIC | GFP_DMA , 1 ) ;
2008-01-26 16:11:23 +03:00
if ( ! device - > ccw_mem ) {
2005-04-17 02:20:36 +04:00
kfree ( device ) ;
return ERR_PTR ( - ENOMEM ) ;
}
/* Get one page for error recovery. */
device - > erp_mem = ( void * ) get_zeroed_page ( GFP_ATOMIC | GFP_DMA ) ;
2008-01-26 16:11:23 +03:00
if ( ! device - > erp_mem ) {
2005-04-17 02:20:36 +04:00
free_pages ( ( unsigned long ) device - > ccw_mem , 1 ) ;
kfree ( device ) ;
return ERR_PTR ( - ENOMEM ) ;
}
dasd_init_chunklist ( & device - > ccw_chunks , device - > ccw_mem , PAGE_SIZE * 2 ) ;
dasd_init_chunklist ( & device - > erp_chunks , device - > erp_mem , PAGE_SIZE ) ;
spin_lock_init ( & device - > mem_lock ) ;
2008-01-26 16:11:23 +03:00
atomic_set ( & device - > tasklet_scheduled , 0 ) ;
2006-06-29 16:58:12 +04:00
tasklet_init ( & device - > tasklet ,
2008-01-26 16:11:23 +03:00
( void ( * ) ( unsigned long ) ) dasd_device_tasklet ,
2005-04-17 02:20:36 +04:00
( unsigned long ) device ) ;
INIT_LIST_HEAD ( & device - > ccw_queue ) ;
init_timer ( & device - > timer ) ;
2009-02-11 12:37:31 +03:00
device - > timer . function = dasd_device_timeout ;
device - > timer . data = ( unsigned long ) device ;
2006-12-06 22:18:20 +03:00
INIT_WORK ( & device - > kick_work , do_kick_device ) ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_NEW ;
device - > target = DASD_STATE_NEW ;
return device ;
}
/*
* Free memory of a device structure .
*/
2008-01-26 16:11:23 +03:00
void dasd_free_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2005-11-07 12:01:30 +03:00
kfree ( device - > private ) ;
2005-04-17 02:20:36 +04:00
free_page ( ( unsigned long ) device - > erp_mem ) ;
free_pages ( ( unsigned long ) device - > ccw_mem , 1 ) ;
kfree ( device ) ;
}
2008-01-26 16:11:23 +03:00
/*
* Allocate memory for a new device structure .
*/
struct dasd_block * dasd_alloc_block ( void )
{
struct dasd_block * block ;
block = kzalloc ( sizeof ( * block ) , GFP_ATOMIC ) ;
if ( ! block )
return ERR_PTR ( - ENOMEM ) ;
/* open_count = 0 means device online but not in use */
atomic_set ( & block - > open_count , - 1 ) ;
spin_lock_init ( & block - > request_queue_lock ) ;
atomic_set ( & block - > tasklet_scheduled , 0 ) ;
tasklet_init ( & block - > tasklet ,
( void ( * ) ( unsigned long ) ) dasd_block_tasklet ,
( unsigned long ) block ) ;
INIT_LIST_HEAD ( & block - > ccw_queue ) ;
spin_lock_init ( & block - > queue_lock ) ;
init_timer ( & block - > timer ) ;
2009-02-11 12:37:31 +03:00
block - > timer . function = dasd_block_timeout ;
block - > timer . data = ( unsigned long ) block ;
2008-01-26 16:11:23 +03:00
return block ;
}
/*
* Free memory of a device structure .
*/
void dasd_free_block ( struct dasd_block * block )
{
kfree ( block ) ;
}
2005-04-17 02:20:36 +04:00
/*
* Make a new device known to the system .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_new_to_known ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
/*
2006-06-29 16:58:12 +04:00
* As long as the device is not in state DASD_STATE_NEW we want to
2005-04-17 02:20:36 +04:00
* keep the reference count > 0.
*/
dasd_get_device ( device ) ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
rc = dasd_alloc_queue ( device - > block ) ;
if ( rc ) {
dasd_put_device ( device ) ;
return rc ;
}
2005-04-17 02:20:36 +04:00
}
device - > state = DASD_STATE_KNOWN ;
return 0 ;
}
/*
* Let the system forget about a device .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_known_to_new ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-03-24 14:15:25 +03:00
/* Disable extended error reporting for this device. */
dasd_eer_disable ( device ) ;
2005-04-17 02:20:36 +04:00
/* Forget the discipline information. */
2008-01-26 16:11:23 +03:00
if ( device - > discipline ) {
if ( device - > discipline - > uncheck_device )
device - > discipline - > uncheck_device ( device ) ;
2006-02-21 05:28:13 +03:00
module_put ( device - > discipline - > owner ) ;
2008-01-26 16:11:23 +03:00
}
2005-04-17 02:20:36 +04:00
device - > discipline = NULL ;
2006-02-21 05:28:13 +03:00
if ( device - > base_discipline )
module_put ( device - > base_discipline - > owner ) ;
device - > base_discipline = NULL ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_NEW ;
2008-01-26 16:11:23 +03:00
if ( device - > block )
dasd_free_queue ( device - > block ) ;
2005-04-17 02:20:36 +04:00
/* Give up reference we took in dasd_state_new_to_known. */
dasd_put_device ( device ) ;
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Request the irq line for the device .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_known_to_basic ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
/* Allocate and register gendisk structure. */
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
rc = dasd_gendisk_alloc ( device - > block ) ;
if ( rc )
return rc ;
}
2005-04-17 02:20:36 +04:00
/* register 'device' debug area, used for all DBF_DEV_XXX calls */
2009-03-26 17:23:49 +03:00
device - > debug_area = debug_register ( dev_name ( & device - > cdev - > dev ) , 4 , 1 ,
2008-01-26 16:11:23 +03:00
8 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
debug_register_view ( device - > debug_area , & debug_sprintf_view ) ;
2006-09-20 17:59:07 +04:00
debug_set_level ( device - > debug_area , DBF_WARNING ) ;
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_EMERG , device , " %s " , " debug area created " ) ;
device - > state = DASD_STATE_BASIC ;
return 0 ;
}
/*
* Release the irq line for the device . Terminate any running i / o .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_basic_to_known ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
dasd_gendisk_free ( device - > block ) ;
dasd_block_clear_timer ( device - > block ) ;
}
rc = dasd_flush_device_queue ( device ) ;
2006-08-30 16:33:33 +04:00
if ( rc )
return rc ;
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
2006-08-30 16:33:33 +04:00
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_EMERG , device , " %p debug area deleted " , device ) ;
if ( device - > debug_area ! = NULL ) {
debug_unregister ( device - > debug_area ) ;
device - > debug_area = NULL ;
}
device - > state = DASD_STATE_KNOWN ;
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Do the initial analysis . The do_analysis function may return
* - EAGAIN in which case the device keeps the state DASD_STATE_BASIC
* until the discipline decides to continue the startup sequence
* by calling the function dasd_change_state . The eckd disciplines
* uses this to start a ccw that detects the format . The completion
* interrupt for this detection ccw uses the kernel event daemon to
* trigger the call to dasd_change_state . All this is done in the
* discipline code , see dasd_eckd . c .
2006-03-08 08:55:39 +03:00
* After the analysis ccw is done ( do_analysis returned 0 ) the block
* device is setup .
* In case the analysis returns an error , the device setup is stopped
* ( a fake disk was already added to allow formatting ) .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_basic_to_ready ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
rc = 0 ;
2008-01-26 16:11:23 +03:00
block = device - > block ;
2006-03-08 08:55:39 +03:00
/* make disk known with correct capacity */
2008-01-26 16:11:23 +03:00
if ( block ) {
if ( block - > base - > discipline - > do_analysis ! = NULL )
rc = block - > base - > discipline - > do_analysis ( block ) ;
if ( rc ) {
if ( rc ! = - EAGAIN )
device - > state = DASD_STATE_UNFMT ;
return rc ;
}
dasd_setup_queue ( block ) ;
set_capacity ( block - > gdp ,
block - > blocks < < block - > s2b_shift ) ;
device - > state = DASD_STATE_READY ;
rc = dasd_scan_partitions ( block ) ;
if ( rc )
device - > state = DASD_STATE_BASIC ;
} else {
device - > state = DASD_STATE_READY ;
}
2006-03-08 08:55:39 +03:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* Remove device from block device layer . Destroy dirty buffers .
* Forget format information . Check if the target level is basic
* and if it is create fake disk for formatting .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_ready_to_basic ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_BASIC ;
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
struct dasd_block * block = device - > block ;
rc = dasd_flush_block_queue ( block ) ;
if ( rc ) {
device - > state = DASD_STATE_READY ;
return rc ;
}
dasd_destroy_partitions ( block ) ;
dasd_flush_request_queue ( block ) ;
block - > blocks = 0 ;
block - > bp_block = 0 ;
block - > s2b_shift = 0 ;
}
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-03-08 08:55:39 +03:00
/*
* Back to basic .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_unfmt_to_basic ( struct dasd_device * device )
2006-03-08 08:55:39 +03:00
{
device - > state = DASD_STATE_BASIC ;
2006-08-30 16:33:33 +04:00
return 0 ;
2006-03-08 08:55:39 +03:00
}
2005-04-17 02:20:36 +04:00
/*
* Make the device online and schedule the bottom half to start
* the requeueing of requests from the linux request queue to the
* ccw queue .
*/
2006-08-30 16:33:33 +04:00
static int
2005-04-17 02:20:36 +04:00
dasd_state_ready_to_online ( struct dasd_device * device )
{
2008-01-26 16:11:23 +03:00
int rc ;
2009-01-09 14:14:50 +03:00
struct gendisk * disk ;
struct disk_part_iter piter ;
struct hd_struct * part ;
2008-01-26 16:11:23 +03:00
if ( device - > discipline - > ready_to_online ) {
rc = device - > discipline - > ready_to_online ( device ) ;
if ( rc )
return rc ;
}
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_ONLINE ;
2009-01-09 14:14:50 +03:00
if ( device - > block ) {
2008-01-26 16:11:23 +03:00
dasd_schedule_block_bh ( device - > block ) ;
2009-01-09 14:14:50 +03:00
disk = device - > block - > bdev - > bd_disk ;
disk_part_iter_init ( & piter , disk , DISK_PITER_INCL_PART0 ) ;
while ( ( part = disk_part_iter_next ( & piter ) ) )
kobject_uevent ( & part_to_dev ( part ) - > kobj , KOBJ_CHANGE ) ;
disk_part_iter_exit ( & piter ) ;
}
2005-04-17 02:20:36 +04:00
return 0 ;
}
/*
* Stop the requeueing of requests again .
*/
2008-01-26 16:11:23 +03:00
static int dasd_state_online_to_ready ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
int rc ;
2009-01-09 14:14:50 +03:00
struct gendisk * disk ;
struct disk_part_iter piter ;
struct hd_struct * part ;
2008-01-26 16:11:23 +03:00
if ( device - > discipline - > online_to_ready ) {
rc = device - > discipline - > online_to_ready ( device ) ;
if ( rc )
return rc ;
}
2005-04-17 02:20:36 +04:00
device - > state = DASD_STATE_READY ;
2009-01-09 14:14:50 +03:00
if ( device - > block ) {
disk = device - > block - > bdev - > bd_disk ;
disk_part_iter_init ( & piter , disk , DISK_PITER_INCL_PART0 ) ;
while ( ( part = disk_part_iter_next ( & piter ) ) )
kobject_uevent ( & part_to_dev ( part ) - > kobj , KOBJ_CHANGE ) ;
disk_part_iter_exit ( & piter ) ;
}
2006-08-30 16:33:33 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/*
* Device startup state changes .
*/
2008-01-26 16:11:23 +03:00
static int dasd_increase_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
rc = 0 ;
if ( device - > state = = DASD_STATE_NEW & &
device - > target > = DASD_STATE_KNOWN )
rc = dasd_state_new_to_known ( device ) ;
if ( ! rc & &
device - > state = = DASD_STATE_KNOWN & &
device - > target > = DASD_STATE_BASIC )
rc = dasd_state_known_to_basic ( device ) ;
if ( ! rc & &
device - > state = = DASD_STATE_BASIC & &
device - > target > = DASD_STATE_READY )
rc = dasd_state_basic_to_ready ( device ) ;
2006-04-28 05:40:10 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_UNFMT & &
device - > target > DASD_STATE_UNFMT )
rc = - EPERM ;
2005-04-17 02:20:36 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_READY & &
device - > target > = DASD_STATE_ONLINE )
rc = dasd_state_ready_to_online ( device ) ;
return rc ;
}
/*
* Device shutdown state changes .
*/
2008-01-26 16:11:23 +03:00
static int dasd_decrease_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-08-30 16:33:33 +04:00
int rc ;
rc = 0 ;
2005-04-17 02:20:36 +04:00
if ( device - > state = = DASD_STATE_ONLINE & &
device - > target < = DASD_STATE_READY )
2006-08-30 16:33:33 +04:00
rc = dasd_state_online_to_ready ( device ) ;
2006-06-29 16:58:12 +04:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_READY & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_BASIC )
2006-08-30 16:33:33 +04:00
rc = dasd_state_ready_to_basic ( device ) ;
2006-03-08 08:55:39 +03:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_UNFMT & &
2006-03-08 08:55:39 +03:00
device - > target < = DASD_STATE_BASIC )
2006-08-30 16:33:33 +04:00
rc = dasd_state_unfmt_to_basic ( device ) ;
2006-03-08 08:55:39 +03:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_BASIC & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_KNOWN )
2006-08-30 16:33:33 +04:00
rc = dasd_state_basic_to_known ( device ) ;
2006-06-29 16:58:12 +04:00
2006-08-30 16:33:33 +04:00
if ( ! rc & &
device - > state = = DASD_STATE_KNOWN & &
2005-04-17 02:20:36 +04:00
device - > target < = DASD_STATE_NEW )
2006-08-30 16:33:33 +04:00
rc = dasd_state_known_to_new ( device ) ;
2005-04-17 02:20:36 +04:00
2006-08-30 16:33:33 +04:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* This is the main startup / shutdown routine .
*/
2008-01-26 16:11:23 +03:00
static void dasd_change_state ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
int rc ;
if ( device - > state = = device - > target )
/* Already where we want to go today... */
return ;
if ( device - > state < device - > target )
rc = dasd_increase_state ( device ) ;
else
rc = dasd_decrease_state ( device ) ;
if ( rc & & rc ! = - EAGAIN )
device - > target = device - > state ;
2009-04-14 17:36:23 +04:00
if ( device - > state = = device - > target ) {
2005-04-17 02:20:36 +04:00
wake_up ( & dasd_init_waitq ) ;
2009-04-14 17:36:23 +04:00
dasd_put_device ( device ) ;
}
2007-04-27 18:01:47 +04:00
/* let user-space know that the device status changed */
kobject_uevent ( & device - > cdev - > dev . kobj , KOBJ_CHANGE ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Kick starter for devices that did not complete the startup / shutdown
* procedure or were sleeping because of a pending state .
* dasd_kick_device will schedule a call do do_kick_device to the kernel
* event daemon .
*/
2008-01-26 16:11:23 +03:00
static void do_kick_device ( struct work_struct * work )
2005-04-17 02:20:36 +04:00
{
2006-12-06 22:18:20 +03:00
struct dasd_device * device = container_of ( work , struct dasd_device , kick_work ) ;
2005-04-17 02:20:36 +04:00
dasd_change_state ( device ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_kick_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
dasd_get_device ( device ) ;
/* queue call to dasd_kick_device to the kernel event daemon. */
schedule_work ( & device - > kick_work ) ;
}
/*
* Set the target state for a device and starts the state change .
*/
2008-01-26 16:11:23 +03:00
void dasd_set_target_state ( struct dasd_device * device , int target )
2005-04-17 02:20:36 +04:00
{
2009-04-14 17:36:23 +04:00
dasd_get_device ( device ) ;
2005-04-17 02:20:36 +04:00
/* If we are in probeonly mode stop at DASD_STATE_READY. */
if ( dasd_probeonly & & target > DASD_STATE_READY )
target = DASD_STATE_READY ;
if ( device - > target ! = target ) {
2009-04-14 17:36:23 +04:00
if ( device - > state = = target ) {
2005-04-17 02:20:36 +04:00
wake_up ( & dasd_init_waitq ) ;
2009-04-14 17:36:23 +04:00
dasd_put_device ( device ) ;
}
2005-04-17 02:20:36 +04:00
device - > target = target ;
}
if ( device - > state ! = device - > target )
dasd_change_state ( device ) ;
}
/*
* Enable devices with device numbers in [ from . . to ] .
*/
2008-01-26 16:11:23 +03:00
static inline int _wait_for_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
return ( device - > state = = device - > target ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_enable_device ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
dasd_set_target_state ( device , DASD_STATE_ONLINE ) ;
if ( device - > state < = DASD_STATE_KNOWN )
/* No discipline for device found. */
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* Now wait for the devices to come up. */
wait_event ( dasd_init_waitq , _wait_for_device ( device ) ) ;
}
/*
* SECTION : device operation ( interrupt handler , start i / o , term i / o . . . )
*/
# ifdef CONFIG_DASD_PROFILE
struct dasd_profile_info_t dasd_global_profile ;
unsigned int dasd_profile_level = DASD_PROFILE_OFF ;
/*
* Increments counter in global and local profiling structures .
*/
2008-01-26 16:11:23 +03:00
# define dasd_profile_counter(value, counter, block) \
2005-04-17 02:20:36 +04:00
{ \
int index ; \
for ( index = 0 ; index < 31 & & value > > ( 2 + index ) ; index + + ) ; \
dasd_global_profile . counter [ index ] + + ; \
2008-01-26 16:11:23 +03:00
block - > profile . counter [ index ] + + ; \
2005-04-17 02:20:36 +04:00
}
/*
* Add profiling information for cqr before execution .
*/
2008-01-26 16:11:23 +03:00
static void dasd_profile_start ( struct dasd_block * block ,
struct dasd_ccw_req * cqr ,
struct request * req )
2005-04-17 02:20:36 +04:00
{
struct list_head * l ;
unsigned int counter ;
if ( dasd_profile_level ! = DASD_PROFILE_ON )
return ;
/* count the length of the chanq for statistics */
counter = 0 ;
2008-01-26 16:11:23 +03:00
list_for_each ( l , & block - > ccw_queue )
2005-04-17 02:20:36 +04:00
if ( + + counter > = 31 )
break ;
dasd_global_profile . dasd_io_nr_req [ counter ] + + ;
2008-01-26 16:11:23 +03:00
block - > profile . dasd_io_nr_req [ counter ] + + ;
2005-04-17 02:20:36 +04:00
}
/*
* Add profiling information for cqr after execution .
*/
2008-01-26 16:11:23 +03:00
static void dasd_profile_end ( struct dasd_block * block ,
struct dasd_ccw_req * cqr ,
struct request * req )
2005-04-17 02:20:36 +04:00
{
long strtime , irqtime , endtime , tottime ; /* in microseconds */
long tottimeps , sectors ;
if ( dasd_profile_level ! = DASD_PROFILE_ON )
return ;
2009-05-07 17:24:39 +04:00
sectors = blk_rq_sectors ( req ) ;
2005-04-17 02:20:36 +04:00
if ( ! cqr - > buildclk | | ! cqr - > startclk | |
! cqr - > stopclk | | ! cqr - > endclk | |
! sectors )
return ;
strtime = ( ( cqr - > startclk - cqr - > buildclk ) > > 12 ) ;
irqtime = ( ( cqr - > stopclk - cqr - > startclk ) > > 12 ) ;
endtime = ( ( cqr - > endclk - cqr - > stopclk ) > > 12 ) ;
tottime = ( ( cqr - > endclk - cqr - > buildclk ) > > 12 ) ;
tottimeps = tottime / sectors ;
if ( ! dasd_global_profile . dasd_io_reqs )
memset ( & dasd_global_profile , 0 ,
2008-01-26 16:11:23 +03:00
sizeof ( struct dasd_profile_info_t ) ) ;
2005-04-17 02:20:36 +04:00
dasd_global_profile . dasd_io_reqs + + ;
dasd_global_profile . dasd_io_sects + = sectors ;
2008-01-26 16:11:23 +03:00
if ( ! block - > profile . dasd_io_reqs )
memset ( & block - > profile , 0 ,
sizeof ( struct dasd_profile_info_t ) ) ;
block - > profile . dasd_io_reqs + + ;
block - > profile . dasd_io_sects + = sectors ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
dasd_profile_counter ( sectors , dasd_io_secs , block ) ;
dasd_profile_counter ( tottime , dasd_io_times , block ) ;
dasd_profile_counter ( tottimeps , dasd_io_timps , block ) ;
dasd_profile_counter ( strtime , dasd_io_time1 , block ) ;
dasd_profile_counter ( irqtime , dasd_io_time2 , block ) ;
dasd_profile_counter ( irqtime / sectors , dasd_io_time2ps , block ) ;
dasd_profile_counter ( endtime , dasd_io_time3 , block ) ;
2005-04-17 02:20:36 +04:00
}
# else
2008-01-26 16:11:23 +03:00
# define dasd_profile_start(block, cqr, req) do {} while (0)
# define dasd_profile_end(block, cqr, req) do {} while (0)
2005-04-17 02:20:36 +04:00
# endif /* CONFIG_DASD_PROFILE */
/*
* Allocate memory for a channel program with ' cplength ' channel
* command words and ' datasize ' additional space . There are two
* variantes : 1 ) dasd_kmalloc_request uses kmalloc to get the needed
* memory and 2 ) dasd_smalloc_request uses the static ccw memory
* that gets allocated for each device .
*/
2008-01-26 16:11:23 +03:00
struct dasd_ccw_req * dasd_kmalloc_request ( char * magic , int cplength ,
int datasize ,
struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
/* Sanity checks */
2006-03-24 20:48:13 +03:00
BUG_ON ( magic = = NULL | | datasize > PAGE_SIZE | |
( cplength * sizeof ( struct ccw1 ) ) > PAGE_SIZE ) ;
2005-04-17 02:20:36 +04:00
2006-03-24 14:15:31 +03:00
cqr = kzalloc ( sizeof ( struct dasd_ccw_req ) , GFP_ATOMIC ) ;
2005-04-17 02:20:36 +04:00
if ( cqr = = NULL )
return ERR_PTR ( - ENOMEM ) ;
cqr - > cpaddr = NULL ;
if ( cplength > 0 ) {
2006-03-24 14:15:31 +03:00
cqr - > cpaddr = kcalloc ( cplength , sizeof ( struct ccw1 ) ,
2005-04-17 02:20:36 +04:00
GFP_ATOMIC | GFP_DMA ) ;
if ( cqr - > cpaddr = = NULL ) {
kfree ( cqr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
}
cqr - > data = NULL ;
if ( datasize > 0 ) {
2006-03-24 14:15:31 +03:00
cqr - > data = kzalloc ( datasize , GFP_ATOMIC | GFP_DMA ) ;
2005-04-17 02:20:36 +04:00
if ( cqr - > data = = NULL ) {
2005-11-07 12:01:30 +03:00
kfree ( cqr - > cpaddr ) ;
2005-04-17 02:20:36 +04:00
kfree ( cqr ) ;
return ERR_PTR ( - ENOMEM ) ;
}
}
strncpy ( ( char * ) & cqr - > magic , magic , 4 ) ;
ASCEBC ( ( char * ) & cqr - > magic , 4 ) ;
set_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ;
dasd_get_device ( device ) ;
return cqr ;
}
2008-01-26 16:11:23 +03:00
struct dasd_ccw_req * dasd_smalloc_request ( char * magic , int cplength ,
int datasize ,
struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
struct dasd_ccw_req * cqr ;
char * data ;
int size ;
/* Sanity checks */
2006-03-24 20:48:13 +03:00
BUG_ON ( magic = = NULL | | datasize > PAGE_SIZE | |
( cplength * sizeof ( struct ccw1 ) ) > PAGE_SIZE ) ;
2005-04-17 02:20:36 +04:00
size = ( sizeof ( struct dasd_ccw_req ) + 7L ) & - 8L ;
if ( cplength > 0 )
size + = cplength * sizeof ( struct ccw1 ) ;
if ( datasize > 0 )
size + = datasize ;
spin_lock_irqsave ( & device - > mem_lock , flags ) ;
cqr = ( struct dasd_ccw_req * )
dasd_alloc_chunk ( & device - > ccw_chunks , size ) ;
spin_unlock_irqrestore ( & device - > mem_lock , flags ) ;
if ( cqr = = NULL )
return ERR_PTR ( - ENOMEM ) ;
memset ( cqr , 0 , sizeof ( struct dasd_ccw_req ) ) ;
data = ( char * ) cqr + ( ( sizeof ( struct dasd_ccw_req ) + 7L ) & - 8L ) ;
cqr - > cpaddr = NULL ;
if ( cplength > 0 ) {
cqr - > cpaddr = ( struct ccw1 * ) data ;
data + = cplength * sizeof ( struct ccw1 ) ;
memset ( cqr - > cpaddr , 0 , cplength * sizeof ( struct ccw1 ) ) ;
}
cqr - > data = NULL ;
if ( datasize > 0 ) {
cqr - > data = data ;
memset ( cqr - > data , 0 , datasize ) ;
}
strncpy ( ( char * ) & cqr - > magic , magic , 4 ) ;
ASCEBC ( ( char * ) & cqr - > magic , 4 ) ;
set_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ;
dasd_get_device ( device ) ;
return cqr ;
}
/*
* Free memory of a channel program . This function needs to free all the
* idal lists that might have been created by dasd_set_cda and the
* struct dasd_ccw_req itself .
*/
2008-01-26 16:11:23 +03:00
void dasd_kfree_request ( struct dasd_ccw_req * cqr , struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-01-06 11:19:28 +03:00
# ifdef CONFIG_64BIT
2005-04-17 02:20:36 +04:00
struct ccw1 * ccw ;
/* Clear any idals used for the request. */
ccw = cqr - > cpaddr ;
do {
clear_normalized_cda ( ccw ) ;
} while ( ccw + + - > flags & ( CCW_FLAG_CC | CCW_FLAG_DC ) ) ;
# endif
2005-11-07 12:01:30 +03:00
kfree ( cqr - > cpaddr ) ;
kfree ( cqr - > data ) ;
2005-04-17 02:20:36 +04:00
kfree ( cqr ) ;
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_sfree_request ( struct dasd_ccw_req * cqr , struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & device - > mem_lock , flags ) ;
dasd_free_chunk ( & device - > ccw_chunks , cqr ) ;
spin_unlock_irqrestore ( & device - > mem_lock , flags ) ;
dasd_put_device ( device ) ;
}
/*
* Check discipline magic in cqr .
*/
2008-01-26 16:11:23 +03:00
static inline int dasd_check_cqr ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
if ( cqr = = NULL )
return - EINVAL ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
if ( strncmp ( ( char * ) & cqr - > magic , device - > discipline - > ebcname , 4 ) ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_WARNING , device ,
2005-04-17 02:20:36 +04:00
" dasd_ccw_req 0x%08x magic doesn't match "
" discipline 0x%08x " ,
cqr - > magic ,
* ( unsigned int * ) device - > discipline - > name ) ;
return - EINVAL ;
}
return 0 ;
}
/*
* Terminate the current i / o and set the request to clear_pending .
* Timer keeps device runnig .
* ccw_device_clear can fail if the i / o subsystem
* is in a bad mood .
*/
2008-01-26 16:11:23 +03:00
int dasd_term_IO ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int retries , rc ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-04-17 02:20:36 +04:00
/* Check the cqr */
rc = dasd_check_cqr ( cqr ) ;
if ( rc )
return rc ;
retries = 0 ;
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
2005-04-17 02:20:36 +04:00
while ( ( retries < 5 ) & & ( cqr - > status = = DASD_CQR_IN_IO ) ) {
rc = ccw_device_clear ( device - > cdev , ( long ) cqr ) ;
switch ( rc ) {
case 0 : /* termination successful */
2006-02-01 14:06:37 +03:00
cqr - > retries - - ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEAR_PENDING ;
2005-04-17 02:20:36 +04:00
cqr - > stopclk = get_clock ( ) ;
2006-08-30 16:33:33 +04:00
cqr - > starttime = 0 ;
2005-04-17 02:20:36 +04:00
DBF_DEV_EVENT ( DBF_DEBUG , device ,
" terminate cqr %p successful " ,
cqr ) ;
break ;
case - ENODEV :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" device gone, retry " ) ;
break ;
case - EIO :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" I/O error, retry " ) ;
break ;
case - EINVAL :
case - EBUSY :
DBF_DEV_EVENT ( DBF_ERR , device , " %s " ,
" device busy, retry later " ) ;
break ;
default :
2009-03-26 17:23:49 +03:00
/* internal error 10 - unknown rc*/
snprintf ( errorstring , ERRORLENGTH , " 10 %d " , rc ) ;
dev_err ( & device - > cdev - > dev , " An error occurred in the "
" DASD device driver, reason=%s \n " , errorstring ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
break ;
}
retries + + ;
}
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Start the i / o . This start_IO can fail if the channel is really busy .
* In that case set up a timer to start the request later .
*/
2008-01-26 16:11:23 +03:00
int dasd_start_IO ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-04-17 02:20:36 +04:00
/* Check the cqr */
rc = dasd_check_cqr ( cqr ) ;
if ( rc )
return rc ;
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
2005-04-17 02:20:36 +04:00
if ( cqr - > retries < 0 ) {
2009-03-26 17:23:49 +03:00
/* internal error 14 - start_IO run out of retries */
sprintf ( errorstring , " 14 %p " , cqr ) ;
dev_err ( & device - > cdev - > dev , " An error occurred in the DASD "
" device driver, reason=%s \n " , errorstring ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_ERROR ;
2005-04-17 02:20:36 +04:00
return - EIO ;
}
cqr - > startclk = get_clock ( ) ;
cqr - > starttime = jiffies ;
cqr - > retries - - ;
2009-03-26 17:23:48 +03:00
if ( cqr - > cpmode = = 1 ) {
rc = ccw_device_tm_start ( device - > cdev , cqr - > cpaddr ,
( long ) cqr , cqr - > lpm ) ;
} else {
rc = ccw_device_start ( device - > cdev , cqr - > cpaddr ,
( long ) cqr , cqr - > lpm , 0 ) ;
}
2005-04-17 02:20:36 +04:00
switch ( rc ) {
case 0 :
cqr - > status = DASD_CQR_IN_IO ;
DBF_DEV_EVENT ( DBF_DEBUG , device ,
" start_IO: request %p started successful " ,
cqr ) ;
break ;
case - EBUSY :
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
2005-04-17 02:20:36 +04:00
" start_IO: device busy, retry later " ) ;
break ;
case - ETIMEDOUT :
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
2005-04-17 02:20:36 +04:00
" start_IO: request timeout, retry later " ) ;
break ;
case - EACCES :
/* -EACCES indicates that the request used only a
* subset of the available pathes and all these
* pathes are gone .
* Do a retry with all available pathes .
*/
cqr - > lpm = LPM_ANYPATH ;
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
2005-04-17 02:20:36 +04:00
" start_IO: selected pathes gone, "
" retry on all pathes " ) ;
break ;
case - ENODEV :
2009-03-26 17:23:48 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
" start_IO: -ENODEV device gone, retry " ) ;
break ;
2005-04-17 02:20:36 +04:00
case - EIO :
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " %s " ,
2009-03-26 17:23:48 +03:00
" start_IO: -EIO device gone, retry " ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2009-03-26 17:23:49 +03:00
/* internal error 11 - unknown rc */
snprintf ( errorstring , ERRORLENGTH , " 11 %d " , rc ) ;
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , errorstring ) ;
2005-04-17 02:20:36 +04:00
BUG ( ) ;
break ;
}
return rc ;
}
/*
* Timeout function for dasd devices . This is used for different purposes
* 1 ) missing interrupt handler for normal operation
* 2 ) delayed start of request where start_IO failed with - EBUSY
* 3 ) timeout for missing state change interrupts
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1 ) ,
* DASD_CQR_QUEUED for 2 ) and 3 ) .
*/
2008-01-26 16:11:23 +03:00
static void dasd_device_timeout ( unsigned long ptr )
2005-04-17 02:20:36 +04:00
{
unsigned long flags ;
struct dasd_device * device ;
device = ( struct dasd_device * ) ptr ;
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
/* re-activate request queue */
device - > stopped & = ~ DASD_STOPPED_PENDING ;
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Setup timeout for a device in jiffies .
*/
2008-01-26 16:11:23 +03:00
void dasd_device_set_timer ( struct dasd_device * device , int expires )
2005-04-17 02:20:36 +04:00
{
2009-02-11 12:37:31 +03:00
if ( expires = = 0 )
del_timer ( & device - > timer ) ;
else
mod_timer ( & device - > timer , jiffies + expires ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Clear timeout for a device .
*/
2008-01-26 16:11:23 +03:00
void dasd_device_clear_timer ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2009-02-11 12:37:31 +03:00
del_timer ( & device - > timer ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
static void dasd_handle_killed_request ( struct ccw_device * cdev ,
unsigned long intparm )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
struct dasd_device * device ;
2008-05-15 18:52:36 +04:00
if ( ! intparm )
return ;
2005-04-17 02:20:36 +04:00
cqr = ( struct dasd_ccw_req * ) intparm ;
if ( cqr - > status ! = DASD_CQR_IN_IO ) {
2009-03-26 17:23:49 +03:00
DBF_EVENT ( DBF_DEBUG ,
2005-04-17 02:20:36 +04:00
" invalid status in handle_killed_request: "
" bus_id %s, status %02x " ,
2008-10-10 23:33:09 +04:00
dev_name ( & cdev - > dev ) , cqr - > status ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
2005-04-17 02:20:36 +04:00
if ( device = = NULL | |
2006-09-20 17:59:05 +04:00
device ! = dasd_device_from_cdev_locked ( cdev ) | |
2005-04-17 02:20:36 +04:00
strncmp ( device - > discipline - > ebcname , ( char * ) & cqr - > magic , 4 ) ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " invalid device in request: "
" bus_id %s " , dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
return ;
}
/* Schedule request to be retried. */
cqr - > status = DASD_CQR_QUEUED ;
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
2008-01-26 16:11:23 +03:00
void dasd_generic_handle_state_change ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2006-03-24 14:15:25 +03:00
/* First of all start sense subsystem status request. */
dasd_eer_snss ( device ) ;
2005-04-17 02:20:36 +04:00
device - > stopped & = ~ DASD_STOPPED_PENDING ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
if ( device - > block )
dasd_schedule_block_bh ( device - > block ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Interrupt handler for " normal " ssch - io based dasd devices .
*/
2008-01-26 16:11:23 +03:00
void dasd_int_handler ( struct ccw_device * cdev , unsigned long intparm ,
struct irb * irb )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr , * next ;
struct dasd_device * device ;
unsigned long long now ;
int expires ;
if ( IS_ERR ( irb ) ) {
switch ( PTR_ERR ( irb ) ) {
case - EIO :
break ;
case - ETIMEDOUT :
2009-03-26 17:23:49 +03:00
DBF_EVENT ( DBF_WARNING , " %s(%s): request timed out \n " ,
2008-10-10 23:33:09 +04:00
__func__ , dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
break ;
default :
2009-03-26 17:23:49 +03:00
DBF_EVENT ( DBF_WARNING , " %s(%s): unknown error %ld \n " ,
2008-10-10 23:33:09 +04:00
__func__ , dev_name ( & cdev - > dev ) , PTR_ERR ( irb ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-05-15 18:52:36 +04:00
dasd_handle_killed_request ( cdev , intparm ) ;
2005-04-17 02:20:36 +04:00
return ;
}
now = get_clock ( ) ;
2008-01-26 16:11:23 +03:00
/* check for unsolicited interrupts */
cqr = ( struct dasd_ccw_req * ) intparm ;
2009-03-26 17:23:48 +03:00
if ( ! cqr | | ( ( scsw_cc ( & irb - > scsw ) = = 1 ) & &
( scsw_fctl ( & irb - > scsw ) & SCSW_FCTL_START_FUNC ) & &
( scsw_stctl ( & irb - > scsw ) & SCSW_STCTL_STATUS_PEND ) ) ) {
2008-01-26 16:11:23 +03:00
if ( cqr & & cqr - > status = = DASD_CQR_IN_IO )
cqr - > status = DASD_CQR_QUEUED ;
2006-09-20 17:59:05 +04:00
device = dasd_device_from_cdev_locked ( cdev ) ;
2005-04-17 02:20:36 +04:00
if ( ! IS_ERR ( device ) ) {
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
device - > discipline - > handle_unsolicited_interrupt ( device ,
irb ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
return ;
}
2008-01-26 16:11:23 +03:00
device = ( struct dasd_device * ) cqr - > startdev ;
if ( ! device | |
2005-04-17 02:20:36 +04:00
strncmp ( device - > discipline - > ebcname , ( char * ) & cqr - > magic , 4 ) ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " invalid device in request: "
" bus_id %s " , dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
return ;
}
/* Check for clear pending */
2008-01-26 16:11:23 +03:00
if ( cqr - > status = = DASD_CQR_CLEAR_PENDING & &
2009-03-26 17:23:48 +03:00
scsw_fctl ( & irb - > scsw ) & SCSW_FCTL_CLEAR_FUNC ) {
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEARED ;
dasd_device_clear_timer ( device ) ;
2006-08-30 16:33:33 +04:00
wake_up ( & dasd_flush_wq ) ;
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2009-03-26 17:23:48 +03:00
/* check status - the request might have been killed by dyn detach */
2005-04-17 02:20:36 +04:00
if ( cqr - > status ! = DASD_CQR_IN_IO ) {
2009-03-26 17:23:49 +03:00
DBF_DEV_EVENT ( DBF_DEBUG , device , " invalid status: bus_id %s, "
" status %02x " , dev_name ( & cdev - > dev ) , cqr - > status ) ;
2005-04-17 02:20:36 +04:00
return ;
}
2009-03-26 17:23:49 +03:00
2008-01-26 16:11:23 +03:00
next = NULL ;
2005-04-17 02:20:36 +04:00
expires = 0 ;
2009-03-26 17:23:48 +03:00
if ( scsw_dstat ( & irb - > scsw ) = = ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) & &
scsw_cstat ( & irb - > scsw ) = = 0 ) {
2008-01-26 16:11:23 +03:00
/* request was completed successfully */
cqr - > status = DASD_CQR_SUCCESS ;
2005-04-17 02:20:36 +04:00
cqr - > stopclk = now ;
/* Start first request on queue if possible -> fast_io. */
2008-01-26 16:11:23 +03:00
if ( cqr - > devlist . next ! = & device - > ccw_queue ) {
next = list_entry ( cqr - > devlist . next ,
struct dasd_ccw_req , devlist ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
} else { /* error */
memcpy ( & cqr - > irb , irb , sizeof ( struct irb ) ) ;
2009-03-26 17:23:49 +03:00
/* log sense for every failed I/O to s390 debugfeature */
dasd_log_sense_dbf ( cqr , irb ) ;
2006-12-08 17:54:15 +03:00
if ( device - > features & DASD_FEATURE_ERPLOG ) {
dasd_log_sense ( cqr , irb ) ;
}
2009-03-26 17:23:49 +03:00
2008-02-05 18:50:46 +03:00
/*
* If we don ' t want complex ERP for this request , then just
* reset this and retry it in the fastpath
2008-01-26 16:11:23 +03:00
*/
2008-02-05 18:50:46 +03:00
if ( ! test_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) & &
2008-01-26 16:11:23 +03:00
cqr - > retries > 0 ) {
2009-03-26 17:23:49 +03:00
if ( cqr - > lpm = = LPM_ANYPATH )
DBF_DEV_EVENT ( DBF_DEBUG , device ,
" default ERP in fastpath "
" (%i retries left) " ,
cqr - > retries ) ;
2008-01-26 16:11:23 +03:00
cqr - > lpm = LPM_ANYPATH ;
cqr - > status = DASD_CQR_QUEUED ;
next = cqr ;
} else
2005-04-17 02:20:36 +04:00
cqr - > status = DASD_CQR_ERROR ;
2008-01-26 16:11:23 +03:00
}
if ( next & & ( next - > status = = DASD_CQR_QUEUED ) & &
( ! device - > stopped ) ) {
if ( device - > discipline - > start_IO ( next ) = = 0 )
expires = next - > expires ;
2005-04-17 02:20:36 +04:00
}
if ( expires ! = 0 )
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , expires ) ;
2005-04-17 02:20:36 +04:00
else
2008-01-26 16:11:23 +03:00
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
}
/*
2008-01-26 16:11:23 +03:00
* If we have an error on a dasd_block layer request then we cancel
* and return all further requests from the same dasd_block as well .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_recovery ( struct dasd_device * device ,
struct dasd_ccw_req * ref_cqr )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
/*
* only requeue request that came from the dasd_block layer
*/
if ( ! ref_cqr - > block )
return ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
list_for_each_safe ( l , n , & device - > ccw_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
if ( cqr - > status = = DASD_CQR_QUEUED & &
ref_cqr - > block = = cqr - > block ) {
cqr - > status = DASD_CQR_CLEARED ;
}
}
} ;
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Remove those ccw requests from the queue that need to be returned
* to the upper layer .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_process_ccw_queue ( struct dasd_device * device ,
struct list_head * final_queue )
2005-04-17 02:20:36 +04:00
{
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
/* Process request with final status. */
list_for_each_safe ( l , n , & device - > ccw_queue ) {
2008-01-26 16:11:23 +03:00
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
2005-04-17 02:20:36 +04:00
/* Stop list processing at the first non-final request. */
2008-01-26 16:11:23 +03:00
if ( cqr - > status = = DASD_CQR_QUEUED | |
cqr - > status = = DASD_CQR_IN_IO | |
cqr - > status = = DASD_CQR_CLEAR_PENDING )
2005-04-17 02:20:36 +04:00
break ;
if ( cqr - > status = = DASD_CQR_ERROR ) {
2008-01-26 16:11:23 +03:00
__dasd_device_recovery ( device , cqr ) ;
2006-03-24 14:15:25 +03:00
}
2005-04-17 02:20:36 +04:00
/* Rechain finished requests to final queue */
2008-01-26 16:11:23 +03:00
list_move_tail ( & cqr - > devlist , final_queue ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
2008-01-26 16:11:23 +03:00
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_process_final_queue ( struct dasd_device * device ,
struct list_head * final_queue )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct list_head * l , * n ;
2005-04-17 02:20:36 +04:00
struct dasd_ccw_req * cqr ;
2008-02-19 17:29:27 +03:00
struct dasd_block * block ;
2008-05-30 12:03:31 +04:00
void ( * callback ) ( struct dasd_ccw_req * , void * data ) ;
void * callback_data ;
2009-03-26 17:23:49 +03:00
char errorstring [ ERRORLENGTH ] ;
2005-05-01 19:58:59 +04:00
2008-01-26 16:11:23 +03:00
list_for_each_safe ( l , n , final_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , devlist ) ;
list_del_init ( & cqr - > devlist ) ;
2008-02-19 17:29:27 +03:00
block = cqr - > block ;
2008-05-30 12:03:31 +04:00
callback = cqr - > callback ;
callback_data = cqr - > callback_data ;
2008-02-19 17:29:27 +03:00
if ( block )
spin_lock_bh ( & block - > queue_lock ) ;
2008-01-26 16:11:23 +03:00
switch ( cqr - > status ) {
case DASD_CQR_SUCCESS :
cqr - > status = DASD_CQR_DONE ;
break ;
case DASD_CQR_ERROR :
cqr - > status = DASD_CQR_NEED_ERP ;
break ;
case DASD_CQR_CLEARED :
cqr - > status = DASD_CQR_TERMINATED ;
break ;
default :
2009-03-26 17:23:49 +03:00
/* internal error 12 - wrong cqr status*/
snprintf ( errorstring , ERRORLENGTH , " 12 %p %x02 " , cqr , cqr - > status ) ;
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , errorstring ) ;
2008-01-26 16:11:23 +03:00
BUG ( ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
if ( cqr - > callback ! = NULL )
2008-05-30 12:03:31 +04:00
( callback ) ( cqr , callback_data ) ;
2008-02-19 17:29:27 +03:00
if ( block )
spin_unlock_bh ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it reached its expire time . If so , terminate the IO .
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_check_expire ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
if ( list_empty ( & device - > ccw_queue ) )
return ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2006-12-04 17:40:15 +03:00
if ( ( cqr - > status = = DASD_CQR_IN_IO & & cqr - > expires ! = 0 ) & &
( time_after_eq ( jiffies , cqr - > expires + cqr - > starttime ) ) ) {
if ( device - > discipline - > term_IO ( cqr ) ! = 0 ) {
/* Hmpf, try again in 5 sec */
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" cqr %p timed out (%is) but cannot be "
" ended, retrying in 5 s \n " ,
cqr , ( cqr - > expires / HZ ) ) ;
2008-01-26 16:11:26 +03:00
cqr - > expires + = 5 * HZ ;
dasd_device_set_timer ( device , 5 * HZ ) ;
2006-12-04 17:40:15 +03:00
} else {
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" cqr %p timed out (%is), %i retries "
" remaining \n " , cqr , ( cqr - > expires / HZ ) ,
cqr - > retries ) ;
2005-04-17 02:20:36 +04:00
}
}
}
/*
* Take a look at the first request on the ccw queue and check
* if it needs to be started .
*/
2008-01-26 16:11:23 +03:00
static void __dasd_device_start_head ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
int rc ;
if ( list_empty ( & device - > ccw_queue ) )
return ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2006-04-11 09:53:47 +04:00
if ( cqr - > status ! = DASD_CQR_QUEUED )
return ;
2008-01-26 16:11:23 +03:00
/* when device is stopped, return request to previous layer */
if ( device - > stopped ) {
cqr - > status = DASD_CQR_CLEARED ;
dasd_schedule_device_bh ( device ) ;
2006-04-11 09:53:47 +04:00
return ;
2006-01-06 11:19:15 +03:00
}
2006-04-11 09:53:47 +04:00
rc = device - > discipline - > start_IO ( cqr ) ;
if ( rc = = 0 )
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , cqr - > expires ) ;
2006-04-11 09:53:47 +04:00
else if ( rc = = - EACCES ) {
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2006-04-11 09:53:47 +04:00
} else
/* Hmpf, try again in 1/2 sec */
2008-01-26 16:11:23 +03:00
dasd_device_set_timer ( device , 50 ) ;
2006-08-30 16:33:33 +04:00
}
2005-04-17 02:20:36 +04:00
/*
2008-01-26 16:11:23 +03:00
* Go through all request on the dasd_device request queue ,
* terminate them on the cdev if necessary , and return them to the
* submitting layer via callback .
* Note :
* Make sure that all ' submitting layers ' still exist when
* this function is called ! . In other words , when ' device ' is a base
* device then all block layer requests must have been removed before
* via dasd_flush_block_queue .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_flush_device_queue ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_ccw_req * cqr , * n ;
int rc ;
2005-04-17 02:20:36 +04:00
struct list_head flush_queue ;
INIT_LIST_HEAD ( & flush_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2006-08-30 16:33:33 +04:00
rc = 0 ;
2008-01-26 16:11:23 +03:00
list_for_each_entry_safe ( cqr , n , & device - > ccw_queue , devlist ) {
2006-08-30 16:33:33 +04:00
/* Check status and move request to flush_queue */
switch ( cqr - > status ) {
case DASD_CQR_IN_IO :
rc = device - > discipline - > term_IO ( cqr ) ;
if ( rc ) {
/* unable to terminate requeust */
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" Flushing the DASD request queue "
" failed for request %p \n " , cqr ) ;
2006-08-30 16:33:33 +04:00
/* stop flush processing */
goto finished ;
}
break ;
case DASD_CQR_QUEUED :
2005-04-17 02:20:36 +04:00
cqr - > stopclk = get_clock ( ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_CLEARED ;
2006-08-30 16:33:33 +04:00
break ;
2008-01-26 16:11:23 +03:00
default : /* no need to modify the others */
2006-08-30 16:33:33 +04:00
break ;
}
2008-01-26 16:11:23 +03:00
list_move_tail ( & cqr - > devlist , & flush_queue ) ;
2006-08-30 16:33:33 +04:00
}
finished :
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2008-01-26 16:11:23 +03:00
/*
* After this point all requests must be in state CLEAR_PENDING ,
* CLEARED , SUCCESS or ERROR . Now wait for CLEAR_PENDING to become
* one of the others .
*/
list_for_each_entry_safe ( cqr , n , & flush_queue , devlist )
wait_event ( dasd_flush_wq ,
( cqr - > status ! = DASD_CQR_CLEAR_PENDING ) ) ;
/*
* Now set each request back to TERMINATED , DONE or NEED_ERP
* and call the callback function of flushed requests
*/
__dasd_device_process_final_queue ( device , & flush_queue ) ;
2006-08-30 16:33:33 +04:00
return rc ;
2005-04-17 02:20:36 +04:00
}
/*
* Acquire the device lock and process queues for the device .
*/
2008-01-26 16:11:23 +03:00
static void dasd_device_tasklet ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct list_head final_queue ;
atomic_set ( & device - > tasklet_scheduled , 0 ) ;
INIT_LIST_HEAD ( & final_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
/* Check expire time of first request on the ccw queue. */
2008-01-26 16:11:23 +03:00
__dasd_device_check_expire ( device ) ;
/* find final requests on ccw queue */
__dasd_device_process_ccw_queue ( device , & final_queue ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
/* Now call the callback function of requests with final status */
2008-01-26 16:11:23 +03:00
__dasd_device_process_final_queue ( device , & final_queue ) ;
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2005-04-17 02:20:36 +04:00
/* Now check if the head of the ccw queue needs to be started. */
2008-01-26 16:11:23 +03:00
__dasd_device_start_head ( device ) ;
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
}
/*
* Schedules a call to dasd_tasklet over the device tasklet .
*/
2008-01-26 16:11:23 +03:00
void dasd_schedule_device_bh ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
/* Protect against rescheduling. */
2006-01-06 11:19:07 +03:00
if ( atomic_cmpxchg ( & device - > tasklet_scheduled , 0 , 1 ) ! = 0 )
2005-04-17 02:20:36 +04:00
return ;
dasd_get_device ( device ) ;
tasklet_hi_schedule ( & device - > tasklet ) ;
}
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the head of the device ccw_queue .
* Start the I / O if possible .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
void dasd_add_request_head ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
unsigned long flags ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_QUEUED ;
list_add ( & cqr - > devlist , & device - > ccw_queue ) ;
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
}
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the tail of the device ccw_queue .
* Start the I / O if possible .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
void dasd_add_request_tail ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
unsigned long flags ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
2008-01-26 16:11:23 +03:00
cqr - > status = DASD_CQR_QUEUED ;
list_add_tail ( & cqr - > devlist , & device - > ccw_queue ) ;
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
}
/*
2008-01-26 16:11:23 +03:00
* Wakeup helper for the ' sleep_on ' functions .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
static void dasd_wakeup_cb ( struct dasd_ccw_req * cqr , void * data )
2005-04-17 02:20:36 +04:00
{
wake_up ( ( wait_queue_head_t * ) data ) ;
}
2008-01-26 16:11:23 +03:00
static inline int _wait_for_wakeup ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2006-02-01 14:06:37 +03:00
rc = ( ( cqr - > status = = DASD_CQR_DONE | |
2008-01-26 16:11:23 +03:00
cqr - > status = = DASD_CQR_NEED_ERP | |
cqr - > status = = DASD_CQR_TERMINATED ) & &
list_empty ( & cqr - > devlist ) ) ;
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
return rc ;
}
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the tail of the device ccw_queue and wait for
* it ' s completion .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_sleep_on ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2006-06-29 16:58:12 +04:00
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
cqr - > callback = dasd_wakeup_cb ;
2008-05-30 12:03:31 +04:00
cqr - > callback_data = ( void * ) & generic_waitq ;
2008-01-26 16:11:23 +03:00
dasd_add_request_tail ( cqr ) ;
2008-05-30 12:03:31 +04:00
wait_event ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
/* Request status is either done or failed. */
2008-01-26 16:11:23 +03:00
rc = ( cqr - > status = = DASD_CQR_DONE ) ? 0 : - EIO ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
2008-01-26 16:11:23 +03:00
* Queue a request to the tail of the device ccw_queue and wait
* interruptible for it ' s completion .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_sleep_on_interruptible ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
int rc ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
cqr - > callback = dasd_wakeup_cb ;
2008-05-30 12:03:31 +04:00
cqr - > callback_data = ( void * ) & generic_waitq ;
2008-01-26 16:11:23 +03:00
dasd_add_request_tail ( cqr ) ;
2008-05-30 12:03:31 +04:00
rc = wait_event_interruptible ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
2008-01-26 16:11:23 +03:00
if ( rc = = - ERESTARTSYS ) {
dasd_cancel_req ( cqr ) ;
/* wait (non-interruptible) for final status */
2008-05-30 12:03:31 +04:00
wait_event ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
rc = ( cqr - > status = = DASD_CQR_DONE ) ? 0 : - EIO ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Whoa nelly now it gets really hairy . For some functions ( e . g . steal lock
* for eckd devices ) the currently running request has to be terminated
* and be put back to status queued , before the special request is added
* to the head of the queue . Then the special request is waited on normally .
*/
2008-01-26 16:11:23 +03:00
static inline int _dasd_term_running_cqr ( struct dasd_device * device )
2005-04-17 02:20:36 +04:00
{
struct dasd_ccw_req * cqr ;
if ( list_empty ( & device - > ccw_queue ) )
return 0 ;
2008-01-26 16:11:23 +03:00
cqr = list_entry ( device - > ccw_queue . next , struct dasd_ccw_req , devlist ) ;
2006-08-30 16:33:33 +04:00
return device - > discipline - > term_IO ( cqr ) ;
2005-04-17 02:20:36 +04:00
}
2008-01-26 16:11:23 +03:00
int dasd_sleep_on_immediatly ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
int rc ;
2006-06-29 16:58:12 +04:00
2008-01-26 16:11:23 +03:00
device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
spin_lock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
rc = _dasd_term_running_cqr ( device ) ;
if ( rc ) {
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
return rc ;
}
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
cqr - > callback = dasd_wakeup_cb ;
2008-05-30 12:03:31 +04:00
cqr - > callback_data = ( void * ) & generic_waitq ;
2005-04-17 02:20:36 +04:00
cqr - > status = DASD_CQR_QUEUED ;
2008-01-26 16:11:23 +03:00
list_add ( & cqr - > devlist , & device - > ccw_queue ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
/* let the bh start the request to keep them in order */
2008-01-26 16:11:23 +03:00
dasd_schedule_device_bh ( device ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
spin_unlock_irq ( get_ccwdev_lock ( device - > cdev ) ) ;
2008-05-30 12:03:31 +04:00
wait_event ( generic_waitq , _wait_for_wakeup ( cqr ) ) ;
2006-06-29 16:58:12 +04:00
2005-04-17 02:20:36 +04:00
/* Request status is either done or failed. */
2008-01-26 16:11:23 +03:00
rc = ( cqr - > status = = DASD_CQR_DONE ) ? 0 : - EIO ;
2005-04-17 02:20:36 +04:00
return rc ;
}
/*
* Cancels a request that was started with dasd_sleep_on_req .
* This is useful to timeout requests . The request will be
* terminated if it is currently in i / o .
* Returns 1 if the request has been terminated .
2008-01-26 16:11:23 +03:00
* 0 if there was no need to terminate the request ( not started yet )
* negative error code if termination failed
* Cancellation of a request is an asynchronous operation ! The calling
* function has to wait until the request is properly returned via callback .
2005-04-17 02:20:36 +04:00
*/
2008-01-26 16:11:23 +03:00
int dasd_cancel_req ( struct dasd_ccw_req * cqr )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_device * device = cqr - > startdev ;
2005-04-17 02:20:36 +04:00
unsigned long flags ;
int rc ;
rc = 0 ;
spin_lock_irqsave ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
switch ( cqr - > status ) {
case DASD_CQR_QUEUED :
2008-01-26 16:11:23 +03:00
/* request was not started - just set to cleared */
cqr - > status = DASD_CQR_CLEARED ;
2005-04-17 02:20:36 +04:00
break ;
case DASD_CQR_IN_IO :
/* request in IO - terminate IO and release again */
2008-01-26 16:11:23 +03:00
rc = device - > discipline - > term_IO ( cqr ) ;
if ( rc ) {
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev ,
" Cancelling request %p failed with rc=%d \n " ,
cqr , rc ) ;
2008-01-26 16:11:23 +03:00
} else {
cqr - > stopclk = get_clock ( ) ;
rc = 1 ;
}
2005-04-17 02:20:36 +04:00
break ;
2008-01-26 16:11:23 +03:00
default : /* already finished or clear pending - do nothing */
2005-04-17 02:20:36 +04:00
break ;
2008-01-26 16:11:23 +03:00
}
spin_unlock_irqrestore ( get_ccwdev_lock ( device - > cdev ) , flags ) ;
dasd_schedule_device_bh ( device ) ;
return rc ;
}
/*
* SECTION : Operations of the dasd_block layer .
*/
/*
* Timeout function for dasd_block . This is used when the block layer
* is waiting for something that may not come reliably , ( e . g . a state
* change interrupt )
*/
static void dasd_block_timeout ( unsigned long ptr )
{
unsigned long flags ;
struct dasd_block * block ;
block = ( struct dasd_block * ) ptr ;
spin_lock_irqsave ( get_ccwdev_lock ( block - > base - > cdev ) , flags ) ;
/* re-activate request queue */
block - > base - > stopped & = ~ DASD_STOPPED_PENDING ;
spin_unlock_irqrestore ( get_ccwdev_lock ( block - > base - > cdev ) , flags ) ;
dasd_schedule_block_bh ( block ) ;
}
/*
* Setup timeout for a dasd_block in jiffies .
*/
void dasd_block_set_timer ( struct dasd_block * block , int expires )
{
2009-02-11 12:37:31 +03:00
if ( expires = = 0 )
del_timer ( & block - > timer ) ;
else
mod_timer ( & block - > timer , jiffies + expires ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Clear timeout for a dasd_block .
*/
void dasd_block_clear_timer ( struct dasd_block * block )
{
2009-02-11 12:37:31 +03:00
del_timer ( & block - > timer ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Process finished error recovery ccw .
*/
static inline void __dasd_block_process_erp ( struct dasd_block * block ,
struct dasd_ccw_req * cqr )
{
dasd_erp_fn_t erp_fn ;
struct dasd_device * device = block - > base ;
if ( cqr - > status = = DASD_CQR_DONE )
DBF_DEV_EVENT ( DBF_NOTICE , device , " %s " , " ERP successful " ) ;
else
2009-03-26 17:23:49 +03:00
dev_err ( & device - > cdev - > dev , " ERP failed for the DASD \n " ) ;
2008-01-26 16:11:23 +03:00
erp_fn = device - > discipline - > erp_postaction ( cqr ) ;
erp_fn ( cqr ) ;
}
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
/*
* Fetch requests from the block device queue .
*/
static void __dasd_process_request_queue ( struct dasd_block * block )
{
struct request_queue * queue ;
struct request * req ;
struct dasd_ccw_req * cqr ;
struct dasd_device * basedev ;
unsigned long flags ;
queue = block - > request_queue ;
basedev = block - > base ;
/* No queue ? Then there is nothing to do. */
if ( queue = = NULL )
return ;
/*
* We requeue request from the block device queue to the ccw
* queue only in two states . In state DASD_STATE_READY the
* partition detection is done and we need to requeue requests
* for that . State DASD_STATE_ONLINE is normal block device
* operation .
*/
if ( basedev - > state < DASD_STATE_READY )
return ;
/* Now we try to fetch requests from the request queue */
2009-05-08 06:54:16 +04:00
while ( ! blk_queue_plugged ( queue ) & & ( req = blk_peek_request ( queue ) ) ) {
2008-01-26 16:11:23 +03:00
if ( basedev - > features & DASD_FEATURE_READONLY & &
rq_data_dir ( req ) = = WRITE ) {
DBF_DEV_EVENT ( DBF_ERR , basedev ,
" Rejecting write request %p " ,
req ) ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
continue ;
}
cqr = basedev - > discipline - > build_cp ( basedev , block , req ) ;
if ( IS_ERR ( cqr ) ) {
if ( PTR_ERR ( cqr ) = = - EBUSY )
break ; /* normal end condition */
if ( PTR_ERR ( cqr ) = = - ENOMEM )
break ; /* terminate request queue loop */
if ( PTR_ERR ( cqr ) = = - EAGAIN ) {
/*
* The current request cannot be build right
* now , we have to try later . If this request
* is the head - of - queue we stop the device
* for 1 / 2 second .
*/
if ( ! list_empty ( & block - > ccw_queue ) )
break ;
spin_lock_irqsave ( get_ccwdev_lock ( basedev - > cdev ) , flags ) ;
basedev - > stopped | = DASD_STOPPED_PENDING ;
spin_unlock_irqrestore ( get_ccwdev_lock ( basedev - > cdev ) , flags ) ;
dasd_block_set_timer ( block , HZ / 2 ) ;
break ;
}
DBF_DEV_EVENT ( DBF_ERR , basedev ,
" CCW creation failed (rc=%ld) "
" on request %p " ,
PTR_ERR ( cqr ) , req ) ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
continue ;
}
/*
* Note : callback is set to dasd_return_cqr_cb in
* __dasd_block_start_head to cover erp requests as well
*/
cqr - > callback_data = ( void * ) req ;
cqr - > status = DASD_CQR_FILLED ;
2009-05-08 06:54:16 +04:00
blk_start_request ( req ) ;
2008-01-26 16:11:23 +03:00
list_add_tail ( & cqr - > blocklist , & block - > ccw_queue ) ;
dasd_profile_start ( block , cqr , req ) ;
}
}
static void __dasd_cleanup_cqr ( struct dasd_ccw_req * cqr )
{
struct request * req ;
int status ;
2008-01-28 12:29:42 +03:00
int error = 0 ;
2008-01-26 16:11:23 +03:00
req = ( struct request * ) cqr - > callback_data ;
dasd_profile_end ( cqr - > block , cqr , req ) ;
2008-02-05 18:50:47 +03:00
status = cqr - > block - > base - > discipline - > free_cp ( cqr , req ) ;
2008-01-28 12:29:42 +03:00
if ( status < = 0 )
error = status ? status : - EIO ;
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , error ) ;
2008-01-26 16:11:23 +03:00
}
/*
* Process ccw request queue .
*/
static void __dasd_process_block_ccw_queue ( struct dasd_block * block ,
struct list_head * final_queue )
{
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
dasd_erp_fn_t erp_fn ;
unsigned long flags ;
struct dasd_device * base = block - > base ;
restart :
/* Process request with final status. */
list_for_each_safe ( l , n , & block - > ccw_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , blocklist ) ;
if ( cqr - > status ! = DASD_CQR_DONE & &
cqr - > status ! = DASD_CQR_FAILED & &
cqr - > status ! = DASD_CQR_NEED_ERP & &
cqr - > status ! = DASD_CQR_TERMINATED )
continue ;
if ( cqr - > status = = DASD_CQR_TERMINATED ) {
base - > discipline - > handle_terminated_request ( cqr ) ;
goto restart ;
}
/* Process requests that may be recovered */
if ( cqr - > status = = DASD_CQR_NEED_ERP ) {
2008-02-05 18:50:46 +03:00
erp_fn = base - > discipline - > erp_action ( cqr ) ;
erp_fn ( cqr ) ;
2008-01-26 16:11:23 +03:00
goto restart ;
}
2008-11-14 20:18:08 +03:00
/* log sense for fatal error */
if ( cqr - > status = = DASD_CQR_FAILED ) {
dasd_log_sense ( cqr , & cqr - > irb ) ;
}
2008-01-26 16:11:23 +03:00
/* First of all call extended error reporting. */
if ( dasd_eer_enabled ( base ) & &
cqr - > status = = DASD_CQR_FAILED ) {
dasd_eer_write ( base , cqr , DASD_EER_FATALERROR ) ;
/* restart request */
cqr - > status = DASD_CQR_FILLED ;
cqr - > retries = 255 ;
spin_lock_irqsave ( get_ccwdev_lock ( base - > cdev ) , flags ) ;
base - > stopped | = DASD_STOPPED_QUIESCE ;
spin_unlock_irqrestore ( get_ccwdev_lock ( base - > cdev ) ,
flags ) ;
goto restart ;
}
/* Process finished ERP request. */
if ( cqr - > refers ) {
__dasd_block_process_erp ( block , cqr ) ;
goto restart ;
}
/* Rechain finished requests to final queue */
cqr - > endclk = get_clock ( ) ;
list_move_tail ( & cqr - > blocklist , final_queue ) ;
}
}
static void dasd_return_cqr_cb ( struct dasd_ccw_req * cqr , void * data )
{
dasd_schedule_block_bh ( cqr - > block ) ;
}
static void __dasd_block_start_head ( struct dasd_block * block )
{
struct dasd_ccw_req * cqr ;
if ( list_empty ( & block - > ccw_queue ) )
return ;
/* We allways begin with the first requests on the queue, as some
* of previously started requests have to be enqueued on a
* dasd_device again for error recovery .
*/
list_for_each_entry ( cqr , & block - > ccw_queue , blocklist ) {
if ( cqr - > status ! = DASD_CQR_FILLED )
continue ;
/* Non-temporary stop condition will trigger fail fast */
if ( block - > base - > stopped & ~ DASD_STOPPED_PENDING & &
test_bit ( DASD_CQR_FLAGS_FAILFAST , & cqr - > flags ) & &
( ! dasd_eer_enabled ( block - > base ) ) ) {
cqr - > status = DASD_CQR_FAILED ;
dasd_schedule_block_bh ( block ) ;
continue ;
}
/* Don't try to start requests if device is stopped */
if ( block - > base - > stopped )
return ;
/* just a fail safe check, should not happen */
if ( ! cqr - > startdev )
cqr - > startdev = block - > base ;
/* make sure that the requests we submit find their way back */
cqr - > callback = dasd_return_cqr_cb ;
dasd_add_request_tail ( cqr ) ;
}
}
/*
* Central dasd_block layer routine . Takes requests from the generic
* block layer request queue , creates ccw requests , enqueues them on
* a dasd_device and processes ccw requests that have been returned .
*/
static void dasd_block_tasklet ( struct dasd_block * block )
{
struct list_head final_queue ;
struct list_head * l , * n ;
struct dasd_ccw_req * cqr ;
atomic_set ( & block - > tasklet_scheduled , 0 ) ;
INIT_LIST_HEAD ( & final_queue ) ;
spin_lock ( & block - > queue_lock ) ;
/* Finish off requests on ccw queue */
__dasd_process_block_ccw_queue ( block , & final_queue ) ;
spin_unlock ( & block - > queue_lock ) ;
/* Now call the callback function of requests with final status */
spin_lock_irq ( & block - > request_queue_lock ) ;
list_for_each_safe ( l , n , & final_queue ) {
cqr = list_entry ( l , struct dasd_ccw_req , blocklist ) ;
list_del_init ( & cqr - > blocklist ) ;
__dasd_cleanup_cqr ( cqr ) ;
}
spin_lock ( & block - > queue_lock ) ;
/* Get new request from the block device request queue */
__dasd_process_request_queue ( block ) ;
/* Now check if the head of the ccw queue needs to be started. */
__dasd_block_start_head ( block ) ;
spin_unlock ( & block - > queue_lock ) ;
spin_unlock_irq ( & block - > request_queue_lock ) ;
dasd_put_device ( block - > base ) ;
}
static void _dasd_wake_block_flush_cb ( struct dasd_ccw_req * cqr , void * data )
{
wake_up ( & dasd_flush_wq ) ;
}
/*
* Go through all request on the dasd_block request queue , cancel them
* on the respective dasd_device , and return them to the generic
* block layer .
*/
static int dasd_flush_block_queue ( struct dasd_block * block )
{
struct dasd_ccw_req * cqr , * n ;
int rc , i ;
struct list_head flush_queue ;
INIT_LIST_HEAD ( & flush_queue ) ;
spin_lock_bh ( & block - > queue_lock ) ;
rc = 0 ;
restart :
list_for_each_entry_safe ( cqr , n , & block - > ccw_queue , blocklist ) {
/* if this request currently owned by a dasd_device cancel it */
if ( cqr - > status > = DASD_CQR_QUEUED )
rc = dasd_cancel_req ( cqr ) ;
if ( rc < 0 )
break ;
/* Rechain request (including erp chain) so it won't be
* touched by the dasd_block_tasklet anymore .
* Replace the callback so we notice when the request
* is returned from the dasd_device layer .
*/
cqr - > callback = _dasd_wake_block_flush_cb ;
for ( i = 0 ; cqr ! = NULL ; cqr = cqr - > refers , i + + )
list_move_tail ( & cqr - > blocklist , & flush_queue ) ;
if ( i > 1 )
/* moved more than one request - need to restart */
goto restart ;
}
spin_unlock_bh ( & block - > queue_lock ) ;
/* Now call the callback function of flushed requests */
restart_cb :
list_for_each_entry_safe ( cqr , n , & flush_queue , blocklist ) {
wait_event ( dasd_flush_wq , ( cqr - > status < DASD_CQR_QUEUED ) ) ;
/* Process finished ERP request. */
if ( cqr - > refers ) {
2008-12-25 15:38:54 +03:00
spin_lock_bh ( & block - > queue_lock ) ;
2008-01-26 16:11:23 +03:00
__dasd_block_process_erp ( block , cqr ) ;
2008-12-25 15:38:54 +03:00
spin_unlock_bh ( & block - > queue_lock ) ;
2008-01-26 16:11:23 +03:00
/* restart list_for_xx loop since dasd_process_erp
* might remove multiple elements */
goto restart_cb ;
}
/* call the callback function */
2008-12-25 15:38:54 +03:00
spin_lock_irq ( & block - > request_queue_lock ) ;
2008-01-26 16:11:23 +03:00
cqr - > endclk = get_clock ( ) ;
list_del_init ( & cqr - > blocklist ) ;
__dasd_cleanup_cqr ( cqr ) ;
2008-12-25 15:38:54 +03:00
spin_unlock_irq ( & block - > request_queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
return rc ;
}
/*
2008-01-26 16:11:23 +03:00
* Schedules a call to dasd_tasklet over the device tasklet .
*/
void dasd_schedule_block_bh ( struct dasd_block * block )
{
/* Protect against rescheduling. */
if ( atomic_cmpxchg ( & block - > tasklet_scheduled , 0 , 1 ) ! = 0 )
return ;
/* life cycle of block is bound to it's base device */
dasd_get_device ( block - > base ) ;
tasklet_hi_schedule ( & block - > tasklet ) ;
}
/*
* SECTION : external block device operations
* ( request queue handling , open , release , etc . )
2005-04-17 02:20:36 +04:00
*/
/*
* Dasd request queue function . Called from ll_rw_blk . c
*/
2008-01-26 16:11:23 +03:00
static void do_dasd_request ( struct request_queue * queue )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
block = queue - > queuedata ;
spin_lock ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
/* Get new request from the block device request queue */
2008-01-26 16:11:23 +03:00
__dasd_process_request_queue ( block ) ;
2005-04-17 02:20:36 +04:00
/* Now check if the head of the ccw queue needs to be started. */
2008-01-26 16:11:23 +03:00
__dasd_block_start_head ( block ) ;
spin_unlock ( & block - > queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Allocate and initialize request queue and default I / O scheduler .
*/
2008-01-26 16:11:23 +03:00
static int dasd_alloc_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
int rc ;
2008-01-26 16:11:23 +03:00
block - > request_queue = blk_init_queue ( do_dasd_request ,
& block - > request_queue_lock ) ;
if ( block - > request_queue = = NULL )
2005-04-17 02:20:36 +04:00
return - ENOMEM ;
2008-01-26 16:11:23 +03:00
block - > request_queue - > queuedata = block ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
elevator_exit ( block - > request_queue - > elevator ) ;
[S390] dasd: fix double elevator_exit call when deadline iosched fails to load
I compiled the kernel without deadline, and the dasd code exits the old
scheduler (CFQ), fails to load the new one (deadline), and then things just
hang - with one of these (sorry about the weird chars - I copy & pasted it
from a 3270 console):
dasd(eckd): 0.0.0151: 3390/0A(CU:3990/01) Cyl:3338 Head:15 Sec:224
------------ cut here ------------
Badness at kernel/mutex.c:134
Modules linked in: dasd_eckd_mod dasd_mod
CPU: 0 Not tainted 2.6.25-rc3 #9
Process exe (pid: 538, task: 000000000d172000, ksp: 000000000d21ef88)
Krnl PSW : 0404000180000000 000000000022fb5c (mutex_lock_nested+0x2a4/0x2cc)
R:0 T:1 IO:0 EX:0 Key:0 M:1 W:0 P:0 AS:0 CC:0 PM:0 EA:3
Krnl GPRS: 0000000000024218 000000000076fc78 0000000000000000 000000000000000f
000000000022f92e 0000000000449898 000000000f921c00 000003e000162590
00000000001539c4 000000000d172000 070000007fffffff 000000000d21f400
000000000f8f2560 00000000002413f8 000000000022fb44 000000000d21f400
Krnl Code: 000000000022fb50: bf2f1000 icm %r2,15,0(%r1)
000000000022fb54: a774fef6 brc 7,22f940
000000000022fb58: a7f40001 brc 15,22fb5a
>000000000022fb5c: a7f4fef2 brc 15,22f940
000000000022fb60: c0e5fffa112a brasl %r14,171db4
000000000022fb66: 1222 ltr %r2,%r2
000000000022fb68: a784fedb brc 8,22f91e
000000000022fb6c: c010002a0086 larl %r1,76fc78
Call Trace:
(<000000000022f92e> mutex_lock_nested+0x76/0x2cc)
<00000000001539c4> elevator_exit+0x38/0x80
<0000000000156ffe> blk_cleanup_queue+0x62/0x7c
<000003e0001d5414> dasd_change_state+0xe0/0x8ec
<000003e0001d5cae> dasd_set_target_state+0x8e/0x9c
<000003e0001d5f74> dasd_generic_set_online+0x160/0x284
<000003e00011e83a> dasd_eckd_set_online+0x2e/0x40
<0000000000199bf4> ccw_device_set_online+0x170/0x2c0
<0000000000199d9e> online_store_recog_and_online+0x5a/0x14c
<000000000019a08a> online_store+0xbe/0x2ec
<000000000018456c> dev_attr_store+0x38/0x58
<000000000010efbc> sysfs_write_file+0x130/0x190
<00000000000af582> vfs_write+0xb2/0x160
<00000000000afc7c> sys_write+0x54/0x9c
<0000000000025e16> sys32_write+0x2e/0x50
<0000000000024218> sysc_noemu+0x10/0x16
<0000000077e82bd2> 0x77e82bd2
Set elevator pointer to NULL in order to avoid double elevator_exit
calls when elevator_init call for deadline iosched fails.
Also make sure the dasd device driver depends on IOSCHED_DEADLINE so
the default IO scheduler of the dasd driver is present.
Signed-off-by: Josef 'Jeff' Sipek <jeffpc@josefsipek.net>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
2008-04-17 09:45:56 +04:00
block - > request_queue - > elevator = NULL ;
2008-01-26 16:11:23 +03:00
rc = elevator_init ( block - > request_queue , " deadline " ) ;
2005-04-17 02:20:36 +04:00
if ( rc ) {
2008-01-26 16:11:23 +03:00
blk_cleanup_queue ( block - > request_queue ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
return 0 ;
}
/*
* Allocate and initialize request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_setup_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
int max ;
2009-05-23 01:17:49 +04:00
blk_queue_logical_block_size ( block - > request_queue , block - > bp_block ) ;
2008-01-26 16:11:23 +03:00
max = block - > base - > discipline - > max_blocks < < block - > s2b_shift ;
blk_queue_max_sectors ( block - > request_queue , max ) ;
blk_queue_max_phys_segments ( block - > request_queue , - 1L ) ;
blk_queue_max_hw_segments ( block - > request_queue , - 1L ) ;
2009-03-26 17:23:48 +03:00
/* with page sized segments we can translate each segement into
* one idaw / tidaw
*/
blk_queue_max_segment_size ( block - > request_queue , PAGE_SIZE ) ;
blk_queue_segment_boundary ( block - > request_queue , PAGE_SIZE - 1 ) ;
2008-01-26 16:11:23 +03:00
blk_queue_ordered ( block - > request_queue , QUEUE_ORDERED_DRAIN , NULL ) ;
2005-04-17 02:20:36 +04:00
}
/*
* Deactivate and free request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_free_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
if ( block - > request_queue ) {
blk_cleanup_queue ( block - > request_queue ) ;
block - > request_queue = NULL ;
2005-04-17 02:20:36 +04:00
}
}
/*
* Flush request on the request queue .
*/
2008-01-26 16:11:23 +03:00
static void dasd_flush_request_queue ( struct dasd_block * block )
2005-04-17 02:20:36 +04:00
{
struct request * req ;
2008-01-26 16:11:23 +03:00
if ( ! block - > request_queue )
2005-04-17 02:20:36 +04:00
return ;
2006-06-29 16:58:12 +04:00
2008-01-26 16:11:23 +03:00
spin_lock_irq ( & block - > request_queue_lock ) ;
2009-05-08 06:54:16 +04:00
while ( ( req = blk_fetch_request ( block - > request_queue ) ) )
2009-04-23 06:05:19 +04:00
__blk_end_request_all ( req , - EIO ) ;
2008-01-26 16:11:23 +03:00
spin_unlock_irq ( & block - > request_queue_lock ) ;
2005-04-17 02:20:36 +04:00
}
2008-03-02 18:36:08 +03:00
static int dasd_open ( struct block_device * bdev , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2008-03-02 18:36:08 +03:00
struct dasd_block * block = bdev - > bd_disk - > private_data ;
2008-01-26 16:11:23 +03:00
struct dasd_device * base = block - > base ;
2005-04-17 02:20:36 +04:00
int rc ;
2008-01-26 16:11:23 +03:00
atomic_inc ( & block - > open_count ) ;
if ( test_bit ( DASD_FLAG_OFFLINE , & base - > flags ) ) {
2005-04-17 02:20:36 +04:00
rc = - ENODEV ;
goto unlock ;
}
2008-01-26 16:11:23 +03:00
if ( ! try_module_get ( base - > discipline - > owner ) ) {
2005-04-17 02:20:36 +04:00
rc = - EINVAL ;
goto unlock ;
}
if ( dasd_probeonly ) {
2009-03-26 17:23:49 +03:00
dev_info ( & base - > cdev - > dev ,
" Accessing the DASD failed because it is in "
" probeonly mode \n " ) ;
2005-04-17 02:20:36 +04:00
rc = - EPERM ;
goto out ;
}
2008-01-26 16:11:23 +03:00
if ( base - > state < = DASD_STATE_BASIC ) {
DBF_DEV_EVENT ( DBF_ERR , base , " %s " ,
2005-04-17 02:20:36 +04:00
" Cannot open unrecognized device " ) ;
rc = - ENODEV ;
goto out ;
}
return 0 ;
out :
2008-01-26 16:11:23 +03:00
module_put ( base - > discipline - > owner ) ;
2005-04-17 02:20:36 +04:00
unlock :
2008-01-26 16:11:23 +03:00
atomic_dec ( & block - > open_count ) ;
2005-04-17 02:20:36 +04:00
return rc ;
}
2008-03-02 18:36:08 +03:00
static int dasd_release ( struct gendisk * disk , fmode_t mode )
2005-04-17 02:20:36 +04:00
{
2008-01-26 16:11:23 +03:00
struct dasd_block * block = disk - > private_data ;
2005-04-17 02:20:36 +04:00
2008-01-26 16:11:23 +03:00
atomic_dec ( & block - > open_count ) ;
module_put ( block - > base - > discipline - > owner ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2006-01-08 12:02:50 +03:00
/*
* Return disk geometry .
*/
2008-01-26 16:11:23 +03:00
static int dasd_getgeo ( struct block_device * bdev , struct hd_geometry * geo )
2006-01-08 12:02:50 +03:00
{
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
struct dasd_device * base ;
2006-01-08 12:02:50 +03:00
2008-01-26 16:11:23 +03:00
block = bdev - > bd_disk - > private_data ;
base = block - > base ;
if ( ! block )
2006-01-08 12:02:50 +03:00
return - ENODEV ;
2008-01-26 16:11:23 +03:00
if ( ! base - > discipline | |
! base - > discipline - > fill_geometry )
2006-01-08 12:02:50 +03:00
return - EINVAL ;
2008-01-26 16:11:23 +03:00
base - > discipline - > fill_geometry ( block , geo ) ;
geo - > start = get_start_sect ( bdev ) > > block - > s2b_shift ;
2006-01-08 12:02:50 +03:00
return 0 ;
}
2005-04-17 02:20:36 +04:00
struct block_device_operations
dasd_device_operations = {
. owner = THIS_MODULE ,
2008-03-02 18:36:08 +03:00
. open = dasd_open ,
. release = dasd_release ,
2009-03-26 17:23:45 +03:00
. ioctl = dasd_ioctl ,
. compat_ioctl = dasd_ioctl ,
2006-01-08 12:02:50 +03:00
. getgeo = dasd_getgeo ,
2005-04-17 02:20:36 +04:00
} ;
2008-01-26 16:11:23 +03:00
/*******************************************************************************
* end of block device operations
*/
2005-04-17 02:20:36 +04:00
static void
dasd_exit ( void )
{
# ifdef CONFIG_PROC_FS
dasd_proc_exit ( ) ;
# endif
2006-03-24 14:15:25 +03:00
dasd_eer_exit ( ) ;
2005-07-27 22:45:03 +04:00
if ( dasd_page_cache ! = NULL ) {
kmem_cache_destroy ( dasd_page_cache ) ;
dasd_page_cache = NULL ;
}
2005-04-17 02:20:36 +04:00
dasd_gendisk_exit ( ) ;
dasd_devmap_exit ( ) ;
if ( dasd_debug_area ! = NULL ) {
debug_unregister ( dasd_debug_area ) ;
dasd_debug_area = NULL ;
}
}
/*
* SECTION : common functions for ccw_driver use
*/
2009-04-14 17:36:23 +04:00
static void dasd_generic_auto_online ( void * data , async_cookie_t cookie )
{
struct ccw_device * cdev = data ;
int ret ;
ret = ccw_device_set_online ( cdev ) ;
if ( ret )
pr_warning ( " %s: Setting the DASD online failed with rc=%d \n " ,
dev_name ( & cdev - > dev ) , ret ) ;
else {
struct dasd_device * device = dasd_device_from_cdev ( cdev ) ;
wait_event ( dasd_init_waitq , _wait_for_device ( device ) ) ;
dasd_put_device ( device ) ;
}
}
2006-01-06 11:19:15 +03:00
/*
* Initial attempt at a probe function . this can be simplified once
* the other detection code is gone .
*/
2008-01-26 16:11:23 +03:00
int dasd_generic_probe ( struct ccw_device * cdev ,
struct dasd_discipline * discipline )
2005-04-17 02:20:36 +04:00
{
int ret ;
2006-06-29 17:08:18 +04:00
ret = ccw_device_set_options ( cdev , CCWDEV_DO_PATHGROUP ) ;
if ( ret ) {
2009-03-26 17:23:49 +03:00
DBF_EVENT ( DBF_WARNING ,
2006-06-29 17:08:18 +04:00
" dasd_generic_probe: could not set ccw-device options "
2008-10-10 23:33:09 +04:00
" for %s \n " , dev_name ( & cdev - > dev ) ) ;
2006-06-29 17:08:18 +04:00
return ret ;
}
2005-04-17 02:20:36 +04:00
ret = dasd_add_sysfs_files ( cdev ) ;
if ( ret ) {
2009-03-26 17:23:49 +03:00
DBF_EVENT ( DBF_WARNING ,
2005-04-17 02:20:36 +04:00
" dasd_generic_probe: could not add sysfs entries "
2008-10-10 23:33:09 +04:00
" for %s \n " , dev_name ( & cdev - > dev ) ) ;
2006-06-29 17:08:18 +04:00
return ret ;
2005-04-17 02:20:36 +04:00
}
2006-06-29 17:08:18 +04:00
cdev - > handler = & dasd_int_handler ;
2005-04-17 02:20:36 +04:00
2006-06-29 17:08:18 +04:00
/*
* Automatically online either all dasd devices ( dasd_autodetect )
* or all devices specified with dasd = parameters during
* initial probe .
*/
if ( ( dasd_get_feature ( cdev , DASD_FEATURE_INITIAL_ONLINE ) > 0 ) | |
2008-10-10 23:33:09 +04:00
( dasd_autodetect & & dasd_busid_known ( dev_name ( & cdev - > dev ) ) ! = 0 ) )
2009-04-14 17:36:23 +04:00
async_schedule ( dasd_generic_auto_online , cdev ) ;
2008-01-26 16:11:08 +03:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2006-01-06 11:19:15 +03:00
/*
* This will one day be called from a global not_oper handler .
* It is also used by driver_unregister during module unload .
*/
2008-01-26 16:11:23 +03:00
void dasd_generic_remove ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2005-04-17 02:20:36 +04:00
2005-05-17 08:53:39 +04:00
cdev - > handler = NULL ;
2005-04-17 02:20:36 +04:00
dasd_remove_sysfs_files ( cdev ) ;
device = dasd_device_from_cdev ( cdev ) ;
if ( IS_ERR ( device ) )
return ;
if ( test_and_set_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ) {
/* Already doing offline processing */
dasd_put_device ( device ) ;
return ;
}
/*
* This device is removed unconditionally . Set offline
* flag to prevent dasd_open from opening it while it is
* no quite down yet .
*/
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* dasd_delete_device destroys the device reference. */
2008-01-26 16:11:23 +03:00
block = device - > block ;
device - > block = NULL ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
2008-01-26 16:11:23 +03:00
/*
* life cycle of block is bound to device , so delete it after
* device was safely removed
*/
if ( block )
dasd_free_block ( block ) ;
2005-04-17 02:20:36 +04:00
}
2006-01-06 11:19:15 +03:00
/*
* Activate a device . This is called from dasd_ { eckd , fba } _probe ( ) when either
2005-04-17 02:20:36 +04:00
* the device is detected for the first time and is supposed to be used
2006-01-06 11:19:15 +03:00
* or the user has started activation through sysfs .
*/
2008-01-26 16:11:23 +03:00
int dasd_generic_set_online ( struct ccw_device * cdev ,
struct dasd_discipline * base_discipline )
2005-04-17 02:20:36 +04:00
{
2006-02-21 05:28:13 +03:00
struct dasd_discipline * discipline ;
2005-04-17 02:20:36 +04:00
struct dasd_device * device ;
2005-09-04 02:57:58 +04:00
int rc ;
2005-05-01 19:58:59 +04:00
2006-06-29 17:08:18 +04:00
/* first online clears initial online feature flag */
dasd_set_feature ( cdev , DASD_FEATURE_INITIAL_ONLINE , 0 ) ;
2005-04-17 02:20:36 +04:00
device = dasd_create_device ( cdev ) ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
2006-02-21 05:28:13 +03:00
discipline = base_discipline ;
2005-09-04 02:57:58 +04:00
if ( device - > features & DASD_FEATURE_USEDIAG ) {
2005-04-17 02:20:36 +04:00
if ( ! dasd_diag_discipline_pointer ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online failed because "
" of missing DIAG discipline \n " ,
dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
return - ENODEV ;
}
discipline = dasd_diag_discipline_pointer ;
}
2006-02-21 05:28:13 +03:00
if ( ! try_module_get ( base_discipline - > owner ) ) {
dasd_delete_device ( device ) ;
return - EINVAL ;
}
if ( ! try_module_get ( discipline - > owner ) ) {
module_put ( base_discipline - > owner ) ;
dasd_delete_device ( device ) ;
return - EINVAL ;
}
device - > base_discipline = base_discipline ;
2005-04-17 02:20:36 +04:00
device - > discipline = discipline ;
2008-01-26 16:11:23 +03:00
/* check_device will allocate block device if necessary */
2005-04-17 02:20:36 +04:00
rc = discipline - > check_device ( device ) ;
if ( rc ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online with discipline %s "
" failed with rc=%i \n " ,
dev_name ( & cdev - > dev ) , discipline - > name , rc ) ;
2006-02-21 05:28:13 +03:00
module_put ( discipline - > owner ) ;
module_put ( base_discipline - > owner ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
return rc ;
}
dasd_set_target_state ( device , DASD_STATE_ONLINE ) ;
if ( device - > state < = DASD_STATE_KNOWN ) {
2009-03-26 17:23:49 +03:00
pr_warning ( " %s Setting the DASD online failed because of a "
" missing discipline \n " , dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
rc = - ENODEV ;
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
2008-01-26 16:11:23 +03:00
if ( device - > block )
dasd_free_block ( device - > block ) ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
} else
pr_debug ( " dasd_generic device %s found \n " ,
2008-10-10 23:33:09 +04:00
dev_name ( & cdev - > dev ) ) ;
2005-04-17 02:20:36 +04:00
dasd_put_device ( device ) ;
return rc ;
}
2008-01-26 16:11:23 +03:00
int dasd_generic_set_offline ( struct ccw_device * cdev )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
2008-01-26 16:11:23 +03:00
struct dasd_block * block ;
2006-04-11 09:53:47 +04:00
int max_count , open_count ;
2005-04-17 02:20:36 +04:00
device = dasd_device_from_cdev ( cdev ) ;
if ( IS_ERR ( device ) )
return PTR_ERR ( device ) ;
if ( test_and_set_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ) {
/* Already doing offline processing */
dasd_put_device ( device ) ;
return 0 ;
}
/*
* We must make sure that this device is currently not in use .
* The open_count is increased for every opener , that includes
* the blkdev_get in dasd_scan_partitions . We are only interested
* in the other openers .
*/
2008-01-26 16:11:23 +03:00
if ( device - > block ) {
2008-04-17 09:46:26 +04:00
max_count = device - > block - > bdev ? 0 : - 1 ;
open_count = atomic_read ( & device - > block - > open_count ) ;
2008-01-26 16:11:23 +03:00
if ( open_count > max_count ) {
if ( open_count > 0 )
2009-03-26 17:23:49 +03:00
pr_warning ( " %s: The DASD cannot be set offline "
" with open count %i \n " ,
dev_name ( & cdev - > dev ) , open_count ) ;
2008-01-26 16:11:23 +03:00
else
2009-03-26 17:23:49 +03:00
pr_warning ( " %s: The DASD cannot be set offline "
" while it is in use \n " ,
dev_name ( & cdev - > dev ) ) ;
2008-01-26 16:11:23 +03:00
clear_bit ( DASD_FLAG_OFFLINE , & device - > flags ) ;
dasd_put_device ( device ) ;
return - EBUSY ;
}
2005-04-17 02:20:36 +04:00
}
dasd_set_target_state ( device , DASD_STATE_NEW ) ;
/* dasd_delete_device destroys the device reference. */
2008-01-26 16:11:23 +03:00
block = device - > block ;
device - > block = NULL ;
2005-04-17 02:20:36 +04:00
dasd_delete_device ( device ) ;
2008-01-26 16:11:23 +03:00
/*
* life cycle of block is bound to device , so delete it after
* device was safely removed
*/
if ( block )
dasd_free_block ( block ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-01-26 16:11:23 +03:00
int dasd_generic_notify ( struct ccw_device * cdev , int event )
2005-04-17 02:20:36 +04:00
{
struct dasd_device * device ;
struct dasd_ccw_req * cqr ;
int ret ;
2008-08-21 21:46:39 +04:00
device = dasd_device_from_cdev_locked ( cdev ) ;
2005-04-17 02:20:36 +04:00
if ( IS_ERR ( device ) )
return 0 ;
ret = 0 ;
switch ( event ) {
case CIO_GONE :
2009-03-31 21:16:05 +04:00
case CIO_BOXED :
2005-04-17 02:20:36 +04:00
case CIO_NO_PATH :
2006-03-24 14:15:25 +03:00
/* First of all call extended error reporting. */
dasd_eer_write ( device , NULL , DASD_EER_NOPATH ) ;
2005-04-17 02:20:36 +04:00
if ( device - > state < DASD_STATE_BASIC )
break ;
/* Device is active. We want to keep it. */
2008-01-26 16:11:23 +03:00
list_for_each_entry ( cqr , & device - > ccw_queue , devlist )
if ( cqr - > status = = DASD_CQR_IN_IO ) {
cqr - > status = DASD_CQR_QUEUED ;
cqr - > retries + + ;
}
device - > stopped | = DASD_STOPPED_DC_WAIT ;
dasd_device_clear_timer ( device ) ;
dasd_schedule_device_bh ( device ) ;
2005-04-17 02:20:36 +04:00
ret = 1 ;
break ;
case CIO_OPER :
/* FIXME: add a sanity check. */
2008-01-26 16:11:23 +03:00
device - > stopped & = ~ DASD_STOPPED_DC_WAIT ;
dasd_schedule_device_bh ( device ) ;
if ( device - > block )
dasd_schedule_block_bh ( device - > block ) ;
2005-04-17 02:20:36 +04:00
ret = 1 ;
break ;
}
dasd_put_device ( device ) ;
return ret ;
}
2007-05-10 17:45:46 +04:00
static struct dasd_ccw_req * dasd_generic_build_rdc ( struct dasd_device * device ,
void * rdc_buffer ,
int rdc_buffer_size ,
char * magic )
2007-05-04 20:47:51 +04:00
{
struct dasd_ccw_req * cqr ;
struct ccw1 * ccw ;
cqr = dasd_smalloc_request ( magic , 1 /* RDC */ , rdc_buffer_size , device ) ;
if ( IS_ERR ( cqr ) ) {
2009-03-26 17:23:49 +03:00
/* internal error 13 - Allocating the RDC request failed*/
dev_err ( & device - > cdev - > dev ,
" An error occurred in the DASD device driver, "
" reason=%s \n " , " 13 " ) ;
2007-05-04 20:47:51 +04:00
return cqr ;
}
ccw = cqr - > cpaddr ;
ccw - > cmd_code = CCW_CMD_RDC ;
ccw - > cda = ( __u32 ) ( addr_t ) rdc_buffer ;
ccw - > count = rdc_buffer_size ;
2008-01-26 16:11:23 +03:00
cqr - > startdev = device ;
cqr - > memdev = device ;
2007-05-04 20:47:51 +04:00
cqr - > expires = 10 * HZ ;
clear_bit ( DASD_CQR_FLAGS_USE_ERP , & cqr - > flags ) ;
cqr - > retries = 2 ;
cqr - > buildclk = get_clock ( ) ;
cqr - > status = DASD_CQR_FILLED ;
return cqr ;
}
int dasd_generic_read_dev_chars ( struct dasd_device * device , char * magic ,
2009-06-12 12:26:37 +04:00
void * rdc_buffer , int rdc_buffer_size )
2007-05-04 20:47:51 +04:00
{
int ret ;
struct dasd_ccw_req * cqr ;
2009-06-12 12:26:37 +04:00
cqr = dasd_generic_build_rdc ( device , rdc_buffer , rdc_buffer_size ,
2007-05-04 20:47:51 +04:00
magic ) ;
if ( IS_ERR ( cqr ) )
return PTR_ERR ( cqr ) ;
ret = dasd_sleep_on ( cqr ) ;
2008-01-26 16:11:23 +03:00
dasd_sfree_request ( cqr , cqr - > memdev ) ;
2007-05-04 20:47:51 +04:00
return ret ;
}
2007-05-10 17:45:45 +04:00
EXPORT_SYMBOL_GPL ( dasd_generic_read_dev_chars ) ;
2006-03-24 14:15:25 +03:00
2009-03-26 17:23:48 +03:00
/*
* In command mode and transport mode we need to look for sense
* data in different places . The sense data itself is allways
* an array of 32 bytes , so we can unify the sense data access
* for both modes .
*/
char * dasd_get_sense ( struct irb * irb )
{
struct tsb * tsb = NULL ;
char * sense = NULL ;
if ( scsw_is_tm ( & irb - > scsw ) & & ( irb - > scsw . tm . fcxs = = 0x01 ) ) {
if ( irb - > scsw . tm . tcw )
tsb = tcw_get_tsb ( ( struct tcw * ) ( unsigned long )
irb - > scsw . tm . tcw ) ;
if ( tsb & & tsb - > length = = 64 & & tsb - > flags )
switch ( tsb - > flags & 0x07 ) {
case 1 : /* tsa_iostat */
sense = tsb - > tsa . iostat . sense ;
break ;
case 2 : /* tsa_ddpc */
sense = tsb - > tsa . ddpc . sense ;
break ;
default :
/* currently we don't use interrogate data */
break ;
}
} else if ( irb - > esw . esw0 . erw . cons ) {
sense = irb - > ecw ;
}
return sense ;
}
EXPORT_SYMBOL_GPL ( dasd_get_sense ) ;
2008-01-26 16:11:23 +03:00
static int __init dasd_init ( void )
2005-04-17 02:20:36 +04:00
{
int rc ;
init_waitqueue_head ( & dasd_init_waitq ) ;
2006-08-30 16:33:33 +04:00
init_waitqueue_head ( & dasd_flush_wq ) ;
2008-05-30 12:03:31 +04:00
init_waitqueue_head ( & generic_waitq ) ;
2005-04-17 02:20:36 +04:00
/* register 'common' DASD debug area, used for all DBF_XXX calls */
2008-01-26 16:11:30 +03:00
dasd_debug_area = debug_register ( " dasd " , 1 , 1 , 8 * sizeof ( long ) ) ;
2005-04-17 02:20:36 +04:00
if ( dasd_debug_area = = NULL ) {
rc = - ENOMEM ;
goto failed ;
}
debug_register_view ( dasd_debug_area , & debug_sprintf_view ) ;
2006-09-20 17:59:07 +04:00
debug_set_level ( dasd_debug_area , DBF_WARNING ) ;
2005-04-17 02:20:36 +04:00
DBF_EVENT ( DBF_EMERG , " %s " , " debug area created " ) ;
dasd_diag_discipline_pointer = NULL ;
rc = dasd_devmap_init ( ) ;
if ( rc )
goto failed ;
rc = dasd_gendisk_init ( ) ;
if ( rc )
goto failed ;
rc = dasd_parse ( ) ;
if ( rc )
goto failed ;
2006-03-24 14:15:25 +03:00
rc = dasd_eer_init ( ) ;
if ( rc )
goto failed ;
2005-04-17 02:20:36 +04:00
# ifdef CONFIG_PROC_FS
rc = dasd_proc_init ( ) ;
if ( rc )
goto failed ;
# endif
return 0 ;
failed :
2009-03-26 17:23:49 +03:00
pr_info ( " The DASD device driver could not be initialized \n " ) ;
2005-04-17 02:20:36 +04:00
dasd_exit ( ) ;
return rc ;
}
module_init ( dasd_init ) ;
module_exit ( dasd_exit ) ;
EXPORT_SYMBOL ( dasd_debug_area ) ;
EXPORT_SYMBOL ( dasd_diag_discipline_pointer ) ;
EXPORT_SYMBOL ( dasd_add_request_head ) ;
EXPORT_SYMBOL ( dasd_add_request_tail ) ;
EXPORT_SYMBOL ( dasd_cancel_req ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_device_clear_timer ) ;
EXPORT_SYMBOL ( dasd_block_clear_timer ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_enable_device ) ;
EXPORT_SYMBOL ( dasd_int_handler ) ;
EXPORT_SYMBOL ( dasd_kfree_request ) ;
EXPORT_SYMBOL ( dasd_kick_device ) ;
EXPORT_SYMBOL ( dasd_kmalloc_request ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_schedule_device_bh ) ;
EXPORT_SYMBOL ( dasd_schedule_block_bh ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_set_target_state ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL ( dasd_device_set_timer ) ;
EXPORT_SYMBOL ( dasd_block_set_timer ) ;
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( dasd_sfree_request ) ;
EXPORT_SYMBOL ( dasd_sleep_on ) ;
EXPORT_SYMBOL ( dasd_sleep_on_immediatly ) ;
EXPORT_SYMBOL ( dasd_sleep_on_interruptible ) ;
EXPORT_SYMBOL ( dasd_smalloc_request ) ;
EXPORT_SYMBOL ( dasd_start_IO ) ;
EXPORT_SYMBOL ( dasd_term_IO ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_probe ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_remove ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_notify ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_set_online ) ;
EXPORT_SYMBOL_GPL ( dasd_generic_set_offline ) ;
2008-01-26 16:11:23 +03:00
EXPORT_SYMBOL_GPL ( dasd_generic_handle_state_change ) ;
EXPORT_SYMBOL_GPL ( dasd_flush_device_queue ) ;
EXPORT_SYMBOL_GPL ( dasd_alloc_block ) ;
EXPORT_SYMBOL_GPL ( dasd_free_block ) ;