2005-04-17 02:20:36 +04:00
/*
2008-06-10 20:20:57 +04:00
* zfcp device driver
2005-04-17 02:20:36 +04:00
*
2008-06-10 20:20:57 +04:00
* Setup and helper functions to access QDIO .
2005-04-17 02:20:36 +04:00
*
2008-06-10 20:20:57 +04:00
* Copyright IBM Corporation 2002 , 2008
2005-04-17 02:20:36 +04:00
*/
2008-12-25 15:39:53 +03:00
# define KMSG_COMPONENT "zfcp"
# define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
2005-04-17 02:20:36 +04:00
# include "zfcp_ext.h"
2008-07-02 12:56:34 +04:00
# define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
static int zfcp_qdio_buffers_enqueue ( struct qdio_buffer * * sbal )
2005-04-17 02:20:36 +04:00
{
2007-07-18 12:55:13 +04:00
int pos ;
2005-04-17 02:20:36 +04:00
2007-07-18 12:55:13 +04:00
for ( pos = 0 ; pos < QDIO_MAX_BUFFERS_PER_Q ; pos + = QBUFF_PER_PAGE ) {
2008-06-10 20:20:57 +04:00
sbal [ pos ] = ( struct qdio_buffer * ) get_zeroed_page ( GFP_KERNEL ) ;
if ( ! sbal [ pos ] )
2007-07-18 12:55:13 +04:00
return - ENOMEM ;
}
for ( pos = 0 ; pos < QDIO_MAX_BUFFERS_PER_Q ; pos + + )
if ( pos % QBUFF_PER_PAGE )
2008-06-10 20:20:57 +04:00
sbal [ pos ] = sbal [ pos - 1 ] + 1 ;
2007-07-18 12:55:13 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-10-01 14:42:16 +04:00
static struct qdio_buffer_element *
2008-06-10 20:20:57 +04:00
zfcp_qdio_sbale ( struct zfcp_qdio_queue * q , int sbal_idx , int sbale_idx )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
return & q - > sbal [ sbal_idx ] - > element [ sbale_idx ] ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
/**
* zfcp_qdio_free - free memory used by request - and resposne queue
* @ adapter : pointer to the zfcp_adapter structure
*/
void zfcp_qdio_free ( struct zfcp_adapter * adapter )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct qdio_buffer * * sbal_req , * * sbal_resp ;
int p ;
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
if ( adapter - > ccw_device )
qdio_free ( adapter - > ccw_device ) ;
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
sbal_req = adapter - > req_q . sbal ;
sbal_resp = adapter - > resp_q . sbal ;
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
for ( p = 0 ; p < QDIO_MAX_BUFFERS_PER_Q ; p + = QBUFF_PER_PAGE ) {
free_page ( ( unsigned long ) sbal_req [ p ] ) ;
free_page ( ( unsigned long ) sbal_resp [ p ] ) ;
}
2005-04-17 02:20:36 +04:00
}
2009-03-02 15:09:04 +03:00
static void zfcp_qdio_handler_error ( struct zfcp_adapter * adapter , char * id )
2005-04-17 02:20:36 +04:00
{
2008-10-01 14:42:15 +04:00
dev_warn ( & adapter - > ccw_device - > dev , " A QDIO problem occurred \n " ) ;
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
zfcp_erp_adapter_reopen ( adapter ,
ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED |
ZFCP_STATUS_COMMON_ERP_FAILED , id , NULL ) ;
2005-04-17 02:20:36 +04:00
}
2008-07-02 12:56:34 +04:00
static void zfcp_qdio_zero_sbals ( struct qdio_buffer * sbal [ ] , int first , int cnt )
{
int i , sbal_idx ;
for ( i = first ; i < first + cnt ; i + + ) {
sbal_idx = i % QDIO_MAX_BUFFERS_PER_Q ;
memset ( sbal [ sbal_idx ] , 0 , sizeof ( struct qdio_buffer ) ) ;
}
}
2009-03-02 15:08:56 +03:00
/* this needs to be called prior to updating the queue fill level */
static void zfcp_qdio_account ( struct zfcp_adapter * adapter )
{
ktime_t now ;
s64 span ;
int free , used ;
spin_lock ( & adapter - > qdio_stat_lock ) ;
now = ktime_get ( ) ;
span = ktime_us_delta ( now , adapter - > req_q_time ) ;
free = max ( 0 , atomic_read ( & adapter - > req_q . count ) ) ;
used = QDIO_MAX_BUFFERS_PER_Q - free ;
adapter - > req_q_util + = used * span ;
adapter - > req_q_time = now ;
spin_unlock ( & adapter - > qdio_stat_lock ) ;
}
2008-07-17 19:16:48 +04:00
static void zfcp_qdio_int_req ( struct ccw_device * cdev , unsigned int qdio_err ,
int queue_no , int first , int count ,
2008-06-10 20:20:57 +04:00
unsigned long parm )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct zfcp_adapter * adapter = ( struct zfcp_adapter * ) parm ;
struct zfcp_qdio_queue * queue = & adapter - > req_q ;
2005-04-17 02:20:36 +04:00
2008-07-17 19:16:48 +04:00
if ( unlikely ( qdio_err ) ) {
zfcp_hba_dbf_event_qdio ( adapter , qdio_err , first , count ) ;
2009-03-02 15:09:04 +03:00
zfcp_qdio_handler_error ( adapter , " qdireq1 " ) ;
2008-06-10 20:20:57 +04:00
return ;
}
2005-04-17 02:20:36 +04:00
/* cleanup all SBALs being program-owned now */
2008-06-10 20:20:57 +04:00
zfcp_qdio_zero_sbals ( queue - > sbal , first , count ) ;
2005-04-17 02:20:36 +04:00
2009-03-02 15:08:56 +03:00
zfcp_qdio_account ( adapter ) ;
2008-06-10 20:20:57 +04:00
atomic_add ( count , & queue - > count ) ;
2005-04-17 02:20:36 +04:00
wake_up ( & adapter - > request_wq ) ;
}
2007-05-07 18:35:04 +04:00
static void zfcp_qdio_reqid_check ( struct zfcp_adapter * adapter ,
2008-06-10 20:20:57 +04:00
unsigned long req_id , int sbal_idx )
2006-08-02 13:05:16 +04:00
{
struct zfcp_fsf_req * fsf_req ;
unsigned long flags ;
spin_lock_irqsave ( & adapter - > req_list_lock , flags ) ;
2007-05-08 13:17:54 +04:00
fsf_req = zfcp_reqlist_find ( adapter , req_id ) ;
2006-08-02 13:05:16 +04:00
2007-05-08 13:17:54 +04:00
if ( ! fsf_req )
/*
* Unknown request means that we have potentially memory
* corruption and must stop the machine immediatly .
*/
2008-06-10 20:20:57 +04:00
panic ( " error: unknown request id (%lx) on adapter %s. \n " ,
2008-12-19 18:57:00 +03:00
req_id , dev_name ( & adapter - > ccw_device - > dev ) ) ;
2006-08-02 13:05:16 +04:00
2007-05-08 13:17:54 +04:00
zfcp_reqlist_remove ( adapter , fsf_req ) ;
2006-08-02 13:05:16 +04:00
spin_unlock_irqrestore ( & adapter - > req_list_lock , flags ) ;
2008-06-10 20:20:57 +04:00
fsf_req - > sbal_response = sbal_idx ;
2008-10-16 10:23:39 +04:00
fsf_req - > qdio_inb_usage = atomic_read ( & adapter - > resp_q . count ) ;
2006-08-02 13:05:16 +04:00
zfcp_fsf_req_complete ( fsf_req ) ;
}
2008-06-10 20:20:57 +04:00
static void zfcp_qdio_resp_put_back ( struct zfcp_adapter * adapter , int processed )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct zfcp_qdio_queue * queue = & adapter - > resp_q ;
struct ccw_device * cdev = adapter - > ccw_device ;
u8 count , start = queue - > first ;
unsigned int retval ;
2005-04-17 02:20:36 +04:00
2008-06-10 20:20:57 +04:00
count = atomic_read ( & queue - > count ) + processed ;
2008-07-17 19:16:48 +04:00
retval = do_QDIO ( cdev , QDIO_FLAG_SYNC_INPUT , 0 , start , count ) ;
2008-06-10 20:20:57 +04:00
if ( unlikely ( retval ) ) {
atomic_set ( & queue - > count , count ) ;
/* FIXME: Recover this with an adapter reopen? */
} else {
queue - > first + = count ;
queue - > first % = QDIO_MAX_BUFFERS_PER_Q ;
atomic_set ( & queue - > count , 0 ) ;
}
}
2008-07-17 19:16:48 +04:00
static void zfcp_qdio_int_resp ( struct ccw_device * cdev , unsigned int qdio_err ,
int queue_no , int first , int count ,
2008-06-10 20:20:57 +04:00
unsigned long parm )
{
struct zfcp_adapter * adapter = ( struct zfcp_adapter * ) parm ;
struct zfcp_qdio_queue * queue = & adapter - > resp_q ;
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * sbale ;
2008-06-10 20:20:57 +04:00
int sbal_idx , sbale_idx , sbal_no ;
2008-07-17 19:16:48 +04:00
if ( unlikely ( qdio_err ) ) {
zfcp_hba_dbf_event_qdio ( adapter , qdio_err , first , count ) ;
2009-03-02 15:09:04 +03:00
zfcp_qdio_handler_error ( adapter , " qdires1 " ) ;
2008-06-10 20:20:57 +04:00
return ;
}
2005-04-17 02:20:36 +04:00
/*
* go through all SBALs from input queue currently
* returned by QDIO layer
*/
2008-06-10 20:20:57 +04:00
for ( sbal_no = 0 ; sbal_no < count ; sbal_no + + ) {
sbal_idx = ( first + sbal_no ) % QDIO_MAX_BUFFERS_PER_Q ;
2005-04-17 02:20:36 +04:00
/* go through all SBALEs of SBAL */
2008-06-10 20:20:57 +04:00
for ( sbale_idx = 0 ; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER ;
sbale_idx + + ) {
sbale = zfcp_qdio_sbale ( queue , sbal_idx , sbale_idx ) ;
2007-05-07 18:35:04 +04:00
zfcp_qdio_reqid_check ( adapter ,
2008-06-10 20:20:57 +04:00
( unsigned long ) sbale - > addr ,
sbal_idx ) ;
if ( likely ( sbale - > flags & SBAL_FLAGS_LAST_ENTRY ) )
2005-04-17 02:20:36 +04:00
break ;
} ;
2008-06-10 20:20:57 +04:00
if ( unlikely ( ! ( sbale - > flags & SBAL_FLAGS_LAST_ENTRY ) ) )
dev_warn ( & adapter - > ccw_device - > dev ,
2008-10-01 14:42:15 +04:00
" A QDIO protocol error occurred, "
" operations continue \n " ) ;
2005-04-17 02:20:36 +04:00
}
/*
* put range of SBALs back to response queue
* ( including SBALs which have already been free before )
*/
2008-06-10 20:20:57 +04:00
zfcp_qdio_resp_put_back ( adapter , count ) ;
2005-04-17 02:20:36 +04:00
}
/**
2008-06-10 20:20:57 +04:00
* zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
* @ fsf_req : pointer to struct fsf_req
* Returns : pointer to qdio_buffer_element ( SBALE ) structure
2005-04-17 02:20:36 +04:00
*/
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * zfcp_qdio_sbale_req ( struct zfcp_fsf_req * req )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
return zfcp_qdio_sbale ( & req - > adapter - > req_q , req - > sbal_last , 0 ) ;
2005-04-17 02:20:36 +04:00
}
/**
2008-06-10 20:20:57 +04:00
* zfcp_qdio_sbale_curr - return curr SBALE on req_q for a struct zfcp_fsf_req
* @ fsf_req : pointer to struct fsf_req
* Returns : pointer to qdio_buffer_element ( SBALE ) structure
2005-04-17 02:20:36 +04:00
*/
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * zfcp_qdio_sbale_curr ( struct zfcp_fsf_req * req )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
return zfcp_qdio_sbale ( & req - > adapter - > req_q , req - > sbal_last ,
req - > sbale_curr ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
static void zfcp_qdio_sbal_limit ( struct zfcp_fsf_req * fsf_req , int max_sbals )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
int count = atomic_read ( & fsf_req - > adapter - > req_q . count ) ;
2005-04-17 02:20:36 +04:00
count = min ( count , max_sbals ) ;
2008-06-10 20:20:57 +04:00
fsf_req - > sbal_limit = ( fsf_req - > sbal_first + count - 1 )
% QDIO_MAX_BUFFERS_PER_Q ;
2005-04-17 02:20:36 +04:00
}
2008-10-01 14:42:16 +04:00
static struct qdio_buffer_element *
2005-04-17 02:20:36 +04:00
zfcp_qdio_sbal_chain ( struct zfcp_fsf_req * fsf_req , unsigned long sbtype )
{
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * sbale ;
2005-04-17 02:20:36 +04:00
/* set last entry flag in current SBALE of current SBAL */
sbale = zfcp_qdio_sbale_curr ( fsf_req ) ;
sbale - > flags | = SBAL_FLAGS_LAST_ENTRY ;
/* don't exceed last allowed SBAL */
2008-05-19 14:17:43 +04:00
if ( fsf_req - > sbal_last = = fsf_req - > sbal_limit )
2005-04-17 02:20:36 +04:00
return NULL ;
/* set chaining flag in first SBALE of current SBAL */
2008-06-10 20:20:57 +04:00
sbale = zfcp_qdio_sbale_req ( fsf_req ) ;
2005-04-17 02:20:36 +04:00
sbale - > flags | = SBAL_FLAGS0_MORE_SBALS ;
/* calculate index of next SBAL */
2008-05-19 14:17:43 +04:00
fsf_req - > sbal_last + + ;
fsf_req - > sbal_last % = QDIO_MAX_BUFFERS_PER_Q ;
2005-04-17 02:20:36 +04:00
/* keep this requests number of SBALs up-to-date */
fsf_req - > sbal_number + + ;
/* start at first SBALE of new SBAL */
fsf_req - > sbale_curr = 0 ;
/* set storage-block type for new SBAL */
sbale = zfcp_qdio_sbale_curr ( fsf_req ) ;
sbale - > flags | = sbtype ;
return sbale ;
}
2008-10-01 14:42:16 +04:00
static struct qdio_buffer_element *
2005-04-17 02:20:36 +04:00
zfcp_qdio_sbale_next ( struct zfcp_fsf_req * fsf_req , unsigned long sbtype )
{
if ( fsf_req - > sbale_curr = = ZFCP_LAST_SBALE_PER_SBAL )
return zfcp_qdio_sbal_chain ( fsf_req , sbtype ) ;
fsf_req - > sbale_curr + + ;
return zfcp_qdio_sbale_curr ( fsf_req ) ;
}
2008-06-10 20:20:57 +04:00
static void zfcp_qdio_undo_sbals ( struct zfcp_fsf_req * fsf_req )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct qdio_buffer * * sbal = fsf_req - > adapter - > req_q . sbal ;
int first = fsf_req - > sbal_first ;
int last = fsf_req - > sbal_last ;
int count = ( last - first + QDIO_MAX_BUFFERS_PER_Q ) %
QDIO_MAX_BUFFERS_PER_Q + 1 ;
zfcp_qdio_zero_sbals ( sbal , first , count ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
static int zfcp_qdio_fill_sbals ( struct zfcp_fsf_req * fsf_req ,
unsigned int sbtype , void * start_addr ,
unsigned int total_length )
2005-04-17 02:20:36 +04:00
{
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * sbale ;
2005-04-17 02:20:36 +04:00
unsigned long remaining , length ;
void * addr ;
2008-06-10 20:20:57 +04:00
/* split segment up */
2005-04-17 02:20:36 +04:00
for ( addr = start_addr , remaining = total_length ; remaining > 0 ;
addr + = length , remaining - = length ) {
2008-06-10 20:20:57 +04:00
sbale = zfcp_qdio_sbale_next ( fsf_req , sbtype ) ;
if ( ! sbale ) {
2008-10-01 14:42:14 +04:00
atomic_inc ( & fsf_req - > adapter - > qdio_outb_full ) ;
2008-06-10 20:20:57 +04:00
zfcp_qdio_undo_sbals ( fsf_req ) ;
2005-04-17 02:20:36 +04:00
return - EINVAL ;
}
2008-06-10 20:20:57 +04:00
/* new piece must not exceed next page boundary */
2005-04-17 02:20:36 +04:00
length = min ( remaining ,
2008-06-10 20:20:57 +04:00
( PAGE_SIZE - ( ( unsigned long ) addr &
2005-04-17 02:20:36 +04:00
( PAGE_SIZE - 1 ) ) ) ) ;
2008-06-10 20:20:57 +04:00
sbale - > addr = addr ;
sbale - > length = length ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
/**
* zfcp_qdio_sbals_from_sg - fill SBALs from scatter - gather list
* @ fsf_req : request to be processed
* @ sbtype : SBALE flags
* @ sg : scatter - gather list
* @ max_sbals : upper bound for number of SBALs to be used
2008-06-10 20:20:57 +04:00
* Returns : number of bytes , or error ( negativ )
2005-04-17 02:20:36 +04:00
*/
2008-06-10 20:20:57 +04:00
int zfcp_qdio_sbals_from_sg ( struct zfcp_fsf_req * fsf_req , unsigned long sbtype ,
struct scatterlist * sg , int max_sbals )
2005-04-17 02:20:36 +04:00
{
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * sbale ;
2008-06-10 20:20:57 +04:00
int retval , bytes = 0 ;
2005-04-17 02:20:36 +04:00
/* figure out last allowed SBAL */
zfcp_qdio_sbal_limit ( fsf_req , max_sbals ) ;
2008-06-10 20:20:57 +04:00
/* set storage-block type for this request */
sbale = zfcp_qdio_sbale_req ( fsf_req ) ;
2005-04-17 02:20:36 +04:00
sbale - > flags | = sbtype ;
2008-06-10 20:20:57 +04:00
for ( ; sg ; sg = sg_next ( sg ) ) {
retval = zfcp_qdio_fill_sbals ( fsf_req , sbtype , sg_virt ( sg ) ,
sg - > length ) ;
if ( retval < 0 )
return retval ;
bytes + = sg - > length ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
2005-04-17 02:20:36 +04:00
/* assume that no other SBALEs are to follow in the same SBAL */
sbale = zfcp_qdio_sbale_curr ( fsf_req ) ;
sbale - > flags | = SBAL_FLAGS_LAST_ENTRY ;
2008-06-10 20:20:57 +04:00
2005-04-17 02:20:36 +04:00
return bytes ;
}
/**
2008-06-10 20:20:57 +04:00
* zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
* @ fsf_req : pointer to struct zfcp_fsf_req
* Returns : 0 on success , error otherwise
2005-04-17 02:20:36 +04:00
*/
2008-06-10 20:20:57 +04:00
int zfcp_qdio_send ( struct zfcp_fsf_req * fsf_req )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct zfcp_adapter * adapter = fsf_req - > adapter ;
struct zfcp_qdio_queue * req_q = & adapter - > req_q ;
int first = fsf_req - > sbal_first ;
int count = fsf_req - > sbal_number ;
2009-03-02 15:09:05 +03:00
int retval ;
unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT ;
2008-06-10 20:20:57 +04:00
2009-03-02 15:08:56 +03:00
zfcp_qdio_account ( adapter ) ;
2009-03-02 15:09:05 +03:00
retval = do_QDIO ( adapter - > ccw_device , qdio_flags , 0 , first , count ) ;
2008-06-10 20:20:57 +04:00
if ( unlikely ( retval ) ) {
zfcp_qdio_zero_sbals ( req_q - > sbal , first , count ) ;
return retval ;
}
/* account for transferred buffers */
atomic_sub ( count , & req_q - > count ) ;
req_q - > first + = count ;
req_q - > first % = QDIO_MAX_BUFFERS_PER_Q ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @ adapter : pointer to struct zfcp_adapter
* Returns : - ENOMEM on memory allocation error or return value from
* qdio_allocate
*/
int zfcp_qdio_allocate ( struct zfcp_adapter * adapter )
{
struct qdio_initialize * init_data ;
if ( zfcp_qdio_buffers_enqueue ( adapter - > req_q . sbal ) | |
zfcp_qdio_buffers_enqueue ( adapter - > resp_q . sbal ) )
return - ENOMEM ;
init_data = & adapter - > qdio_init_data ;
init_data - > cdev = adapter - > ccw_device ;
init_data - > q_format = QDIO_ZFCP_QFMT ;
2008-12-19 18:57:00 +03:00
memcpy ( init_data - > adapter_name , dev_name ( & adapter - > ccw_device - > dev ) , 8 ) ;
2008-06-10 20:20:57 +04:00
ASCEBC ( init_data - > adapter_name , 8 ) ;
init_data - > qib_param_field_format = 0 ;
init_data - > qib_param_field = NULL ;
init_data - > input_slib_elements = NULL ;
init_data - > output_slib_elements = NULL ;
init_data - > no_input_qs = 1 ;
init_data - > no_output_qs = 1 ;
init_data - > input_handler = zfcp_qdio_int_resp ;
init_data - > output_handler = zfcp_qdio_int_req ;
init_data - > int_parm = ( unsigned long ) adapter ;
init_data - > flags = QDIO_INBOUND_0COPY_SBALS |
QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS ;
init_data - > input_sbal_addr_array =
( void * * ) ( adapter - > resp_q . sbal ) ;
init_data - > output_sbal_addr_array =
( void * * ) ( adapter - > req_q . sbal ) ;
return qdio_allocate ( init_data ) ;
}
/**
* zfcp_close_qdio - close qdio queues for an adapter
2005-04-17 02:20:36 +04:00
*/
2008-06-10 20:20:57 +04:00
void zfcp_qdio_close ( struct zfcp_adapter * adapter )
2005-04-17 02:20:36 +04:00
{
2008-06-10 20:20:57 +04:00
struct zfcp_qdio_queue * req_q ;
int first , count ;
2008-10-01 14:42:16 +04:00
if ( ! ( atomic_read ( & adapter - > status ) & ZFCP_STATUS_ADAPTER_QDIOUP ) )
2008-06-10 20:20:57 +04:00
return ;
/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
req_q = & adapter - > req_q ;
2008-10-01 14:42:20 +04:00
spin_lock_bh ( & adapter - > req_q_lock ) ;
2008-06-10 20:20:57 +04:00
atomic_clear_mask ( ZFCP_STATUS_ADAPTER_QDIOUP , & adapter - > status ) ;
2008-10-01 14:42:20 +04:00
spin_unlock_bh ( & adapter - > req_q_lock ) ;
2008-06-10 20:20:57 +04:00
2008-07-17 19:16:48 +04:00
qdio_shutdown ( adapter - > ccw_device , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
2008-06-10 20:20:57 +04:00
/* cleanup used outbound sbals */
count = atomic_read ( & req_q - > count ) ;
if ( count < QDIO_MAX_BUFFERS_PER_Q ) {
first = ( req_q - > first + count ) % QDIO_MAX_BUFFERS_PER_Q ;
count = QDIO_MAX_BUFFERS_PER_Q - count ;
zfcp_qdio_zero_sbals ( req_q - > sbal , first , count ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
req_q - > first = 0 ;
atomic_set ( & req_q - > count , 0 ) ;
adapter - > resp_q . first = 0 ;
atomic_set ( & adapter - > resp_q . count , 0 ) ;
2005-04-17 02:20:36 +04:00
}
2008-06-10 20:20:57 +04:00
/**
* zfcp_qdio_open - prepare and initialize response queue
* @ adapter : pointer to struct zfcp_adapter
* Returns : 0 on success , otherwise - EIO
*/
int zfcp_qdio_open ( struct zfcp_adapter * adapter )
{
2008-10-01 14:42:16 +04:00
struct qdio_buffer_element * sbale ;
2008-06-10 20:20:57 +04:00
int cc ;
2008-10-01 14:42:16 +04:00
if ( atomic_read ( & adapter - > status ) & ZFCP_STATUS_ADAPTER_QDIOUP )
2008-06-10 20:20:57 +04:00
return - EIO ;
2008-10-01 14:42:15 +04:00
if ( qdio_establish ( & adapter - > qdio_init_data ) )
goto failed_establish ;
2008-06-10 20:20:57 +04:00
2008-10-01 14:42:15 +04:00
if ( qdio_activate ( adapter - > ccw_device ) )
2008-06-10 20:20:57 +04:00
goto failed_qdio ;
for ( cc = 0 ; cc < QDIO_MAX_BUFFERS_PER_Q ; cc + + ) {
sbale = & ( adapter - > resp_q . sbal [ cc ] - > element [ 0 ] ) ;
sbale - > length = 0 ;
sbale - > flags = SBAL_FLAGS_LAST_ENTRY ;
sbale - > addr = NULL ;
}
if ( do_QDIO ( adapter - > ccw_device , QDIO_FLAG_SYNC_INPUT , 0 , 0 ,
2008-10-01 14:42:15 +04:00
QDIO_MAX_BUFFERS_PER_Q ) )
2008-06-10 20:20:57 +04:00
goto failed_qdio ;
/* set index of first avalable SBALS / number of available SBALS */
adapter - > req_q . first = 0 ;
atomic_set ( & adapter - > req_q . count , QDIO_MAX_BUFFERS_PER_Q ) ;
return 0 ;
failed_qdio :
2008-07-17 19:16:48 +04:00
qdio_shutdown ( adapter - > ccw_device , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
2008-10-01 14:42:15 +04:00
failed_establish :
dev_err ( & adapter - > ccw_device - > dev ,
" Setting up the QDIO connection to the FCP adapter failed \n " ) ;
2008-06-10 20:20:57 +04:00
return - EIO ;
}