2008-07-17 19:16:48 +04:00
/*
* linux / drivers / s390 / cio / qdio_main . c
*
* Linux for s390 qdio support , buffer handling , qdio API and module support .
*
* Copyright 2000 , 2008 IBM Corp .
* Author ( s ) : Utz Bacher < utz . bacher @ de . ibm . com >
* Jan Glauber < jang @ linux . vnet . ibm . com >
* 2.6 cio integration by Cornelia Huck < cornelia . huck @ de . ibm . com >
*/
# include <linux/module.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/timer.h>
# include <linux/delay.h>
# include <asm/atomic.h>
# include <asm/debug.h>
# include <asm/qdio.h>
# include "cio.h"
# include "css.h"
# include "device.h"
# include "qdio.h"
# include "qdio_debug.h"
# include "qdio_perf.h"
MODULE_AUTHOR ( " Utz Bacher <utz.bacher@de.ibm.com>, " \
" Jan Glauber <jang@linux.vnet.ibm.com> " ) ;
MODULE_DESCRIPTION ( " QDIO base support " ) ;
MODULE_LICENSE ( " GPL " ) ;
static inline int do_siga_sync ( struct subchannel_id schid ,
unsigned int out_mask , unsigned int in_mask )
{
register unsigned long __fc asm ( " 0 " ) = 2 ;
register struct subchannel_id __schid asm ( " 1 " ) = schid ;
register unsigned long out asm ( " 2 " ) = out_mask ;
register unsigned long in asm ( " 3 " ) = in_mask ;
int cc ;
asm volatile (
" siga 0 \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( cc )
: " d " ( __fc ) , " d " ( __schid ) , " d " ( out ) , " d " ( in ) : " cc " ) ;
return cc ;
}
static inline int do_siga_input ( struct subchannel_id schid , unsigned int mask )
{
register unsigned long __fc asm ( " 0 " ) = 1 ;
register struct subchannel_id __schid asm ( " 1 " ) = schid ;
register unsigned long __mask asm ( " 2 " ) = mask ;
int cc ;
asm volatile (
" siga 0 \n "
" ipm %0 \n "
" srl %0,28 \n "
: " =d " ( cc )
: " d " ( __fc ) , " d " ( __schid ) , " d " ( __mask ) : " cc " , " memory " ) ;
return cc ;
}
/**
* do_siga_output - perform SIGA - w / wt function
* @ schid : subchannel id or in case of QEBSM the subchannel token
* @ mask : which output queues to process
* @ bb : busy bit indicator , set only if SIGA - w / wt could not access a buffer
* @ fc : function code to perform
*
* Returns cc or QDIO_ERROR_SIGA_ACCESS_EXCEPTION .
* Note : For IQDC unicast queues only the highest priority queue is processed .
*/
static inline int do_siga_output ( unsigned long schid , unsigned long mask ,
2008-12-25 15:38:48 +03:00
unsigned int * bb , unsigned int fc )
2008-07-17 19:16:48 +04:00
{
register unsigned long __fc asm ( " 0 " ) = fc ;
register unsigned long __schid asm ( " 1 " ) = schid ;
register unsigned long __mask asm ( " 2 " ) = mask ;
int cc = QDIO_ERROR_SIGA_ACCESS_EXCEPTION ;
asm volatile (
" siga 0 \n "
" 0: ipm %0 \n "
" srl %0,28 \n "
" 1: \n "
EX_TABLE ( 0 b , 1 b )
: " +d " ( cc ) , " +d " ( __fc ) , " +d " ( __schid ) , " +d " ( __mask )
: : " cc " , " memory " ) ;
* bb = ( ( unsigned int ) __fc ) > > 31 ;
return cc ;
}
static inline int qdio_check_ccq ( struct qdio_q * q , unsigned int ccq )
{
/* all done or next buffer state different */
if ( ccq = = 0 | | ccq = = 32 )
return 0 ;
/* not all buffers processed */
if ( ccq = = 96 | | ccq = = 97 )
return 1 ;
/* notify devices immediately */
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x ccq:%3d " , SCH_NO ( q ) , ccq ) ;
2008-07-17 19:16:48 +04:00
return - EIO ;
}
/**
* qdio_do_eqbs - extract buffer states for QEBSM
* @ q : queue to manipulate
* @ state : state of the extracted buffers
* @ start : buffer number to start at
* @ count : count of buffers to examine
2008-12-25 15:38:47 +03:00
* @ auto_ack : automatically acknowledge buffers
2008-07-17 19:16:48 +04:00
*
2009-01-08 05:09:16 +03:00
* Returns the number of successfully extracted equal buffer states .
2008-07-17 19:16:48 +04:00
* Stops processing if a state is different from the last buffers state .
*/
static int qdio_do_eqbs ( struct qdio_q * q , unsigned char * state ,
2008-12-25 15:38:47 +03:00
int start , int count , int auto_ack )
2008-07-17 19:16:48 +04:00
{
unsigned int ccq = 0 ;
int tmp_count = count , tmp_start = start ;
int nr = q - > nr ;
int rc ;
BUG_ON ( ! q - > irq_ptr - > sch_token ) ;
2008-12-25 15:38:44 +03:00
qdio_perf_stat_inc ( & perf_stats . debug_eqbs_all ) ;
2008-07-17 19:16:48 +04:00
if ( ! q - > is_input_q )
nr + = q - > irq_ptr - > nr_input_qs ;
again :
2008-12-25 15:38:47 +03:00
ccq = do_eqbs ( q - > irq_ptr - > sch_token , state , nr , & tmp_start , & tmp_count ,
auto_ack ) ;
2008-07-17 19:16:48 +04:00
rc = qdio_check_ccq ( q , ccq ) ;
/* At least one buffer was processed, return and extract the remaining
* buffers later .
*/
2008-12-25 15:38:44 +03:00
if ( ( ccq = = 96 ) & & ( count ! = tmp_count ) ) {
qdio_perf_stat_inc ( & perf_stats . debug_eqbs_incomplete ) ;
2008-07-17 19:16:48 +04:00
return ( count - tmp_count ) ;
2008-12-25 15:38:44 +03:00
}
2008-12-25 15:38:46 +03:00
2008-07-17 19:16:48 +04:00
if ( rc = = 1 ) {
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_WARN , q - > irq_ptr , " EQBS again:%2d " , ccq ) ;
2008-07-17 19:16:48 +04:00
goto again ;
}
if ( rc < 0 ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x EQBS ERROR " , SCH_NO ( q ) ) ;
DBF_ERROR ( " %3d%3d%2d " , count , tmp_count , nr ) ;
2008-07-17 19:16:48 +04:00
q - > handler ( q - > irq_ptr - > cdev ,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION ,
0 , - 1 , - 1 , q - > irq_ptr - > int_parm ) ;
return 0 ;
}
return count - tmp_count ;
}
/**
* qdio_do_sqbs - set buffer states for QEBSM
* @ q : queue to manipulate
* @ state : new state of the buffers
* @ start : first buffer number to change
* @ count : how many buffers to change
*
* Returns the number of successfully changed buffers .
* Does retrying until the specified count of buffer states is set or an
* error occurs .
*/
static int qdio_do_sqbs ( struct qdio_q * q , unsigned char state , int start ,
int count )
{
unsigned int ccq = 0 ;
int tmp_count = count , tmp_start = start ;
int nr = q - > nr ;
int rc ;
2008-12-25 15:38:47 +03:00
if ( ! count )
return 0 ;
2008-07-17 19:16:48 +04:00
BUG_ON ( ! q - > irq_ptr - > sch_token ) ;
2008-12-25 15:38:44 +03:00
qdio_perf_stat_inc ( & perf_stats . debug_sqbs_all ) ;
2008-07-17 19:16:48 +04:00
if ( ! q - > is_input_q )
nr + = q - > irq_ptr - > nr_input_qs ;
again :
ccq = do_sqbs ( q - > irq_ptr - > sch_token , state , nr , & tmp_start , & tmp_count ) ;
rc = qdio_check_ccq ( q , ccq ) ;
if ( rc = = 1 ) {
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " SQBS again:%2d " , ccq ) ;
2008-12-25 15:38:44 +03:00
qdio_perf_stat_inc ( & perf_stats . debug_sqbs_incomplete ) ;
2008-07-17 19:16:48 +04:00
goto again ;
}
if ( rc < 0 ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x SQBS ERROR " , SCH_NO ( q ) ) ;
DBF_ERROR ( " %3d%3d%2d " , count , tmp_count , nr ) ;
2008-07-17 19:16:48 +04:00
q - > handler ( q - > irq_ptr - > cdev ,
QDIO_ERROR_ACTIVATE_CHECK_CONDITION ,
0 , - 1 , - 1 , q - > irq_ptr - > int_parm ) ;
return 0 ;
}
WARN_ON ( tmp_count ) ;
return count - tmp_count ;
}
/* returns number of examined buffers and their common state in *state */
static inline int get_buf_states ( struct qdio_q * q , unsigned int bufnr ,
2008-12-25 15:38:47 +03:00
unsigned char * state , unsigned int count ,
int auto_ack )
2008-07-17 19:16:48 +04:00
{
unsigned char __state = 0 ;
int i ;
BUG_ON ( bufnr > QDIO_MAX_BUFFERS_MASK ) ;
BUG_ON ( count > QDIO_MAX_BUFFERS_PER_Q ) ;
if ( is_qebsm ( q ) )
2008-12-25 15:38:47 +03:00
return qdio_do_eqbs ( q , state , bufnr , count , auto_ack ) ;
2008-07-17 19:16:48 +04:00
for ( i = 0 ; i < count ; i + + ) {
if ( ! __state )
__state = q - > slsb . val [ bufnr ] ;
else if ( q - > slsb . val [ bufnr ] ! = __state )
break ;
bufnr = next_buf ( bufnr ) ;
}
* state = __state ;
return i ;
}
inline int get_buf_state ( struct qdio_q * q , unsigned int bufnr ,
2008-12-25 15:38:47 +03:00
unsigned char * state , int auto_ack )
2008-07-17 19:16:48 +04:00
{
2008-12-25 15:38:47 +03:00
return get_buf_states ( q , bufnr , state , 1 , auto_ack ) ;
2008-07-17 19:16:48 +04:00
}
/* wrap-around safe setting of slsb states, returns number of changed buffers */
static inline int set_buf_states ( struct qdio_q * q , int bufnr ,
unsigned char state , int count )
{
int i ;
BUG_ON ( bufnr > QDIO_MAX_BUFFERS_MASK ) ;
BUG_ON ( count > QDIO_MAX_BUFFERS_PER_Q ) ;
if ( is_qebsm ( q ) )
return qdio_do_sqbs ( q , state , bufnr , count ) ;
for ( i = 0 ; i < count ; i + + ) {
xchg ( & q - > slsb . val [ bufnr ] , state ) ;
bufnr = next_buf ( bufnr ) ;
}
return count ;
}
static inline int set_buf_state ( struct qdio_q * q , int bufnr ,
unsigned char state )
{
return set_buf_states ( q , bufnr , state , 1 ) ;
}
/* set slsb states to initial state */
void qdio_init_buf_states ( struct qdio_irq * irq_ptr )
{
struct qdio_q * q ;
int i ;
for_each_input_queue ( irq_ptr , q , i )
set_buf_states ( q , 0 , SLSB_P_INPUT_NOT_INIT ,
QDIO_MAX_BUFFERS_PER_Q ) ;
for_each_output_queue ( irq_ptr , q , i )
set_buf_states ( q , 0 , SLSB_P_OUTPUT_NOT_INIT ,
QDIO_MAX_BUFFERS_PER_Q ) ;
}
static int qdio_siga_sync ( struct qdio_q * q , unsigned int output ,
unsigned int input )
{
int cc ;
if ( ! need_siga_sync ( q ) )
return 0 ;
2008-12-25 15:38:48 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " siga-s:%1d " , q - > nr ) ;
2008-07-17 19:16:48 +04:00
qdio_perf_stat_inc ( & perf_stats . siga_sync ) ;
cc = do_siga_sync ( q - > irq_ptr - > schid , output , input ) ;
2008-12-25 15:38:46 +03:00
if ( cc )
DBF_ERROR ( " %4x SIGA-S:%2d " , SCH_NO ( q ) , cc ) ;
2008-07-17 19:16:48 +04:00
return cc ;
}
inline int qdio_siga_sync_q ( struct qdio_q * q )
{
if ( q - > is_input_q )
return qdio_siga_sync ( q , 0 , q - > mask ) ;
else
return qdio_siga_sync ( q , q - > mask , 0 ) ;
}
static inline int qdio_siga_sync_out ( struct qdio_q * q )
{
return qdio_siga_sync ( q , ~ 0U , 0 ) ;
}
static inline int qdio_siga_sync_all ( struct qdio_q * q )
{
return qdio_siga_sync ( q , ~ 0U , ~ 0U ) ;
}
2008-12-25 15:38:48 +03:00
static int qdio_siga_output ( struct qdio_q * q , unsigned int * busy_bit )
2008-07-17 19:16:48 +04:00
{
unsigned long schid ;
2008-12-25 15:38:48 +03:00
unsigned int fc = 0 ;
u64 start_time = 0 ;
int cc ;
2008-07-17 19:16:48 +04:00
2008-12-25 15:38:48 +03:00
if ( q - > u . out . use_enh_siga )
2008-10-10 23:33:18 +04:00
fc = 3 ;
2008-12-25 15:38:48 +03:00
if ( is_qebsm ( q ) ) {
2008-07-17 19:16:48 +04:00
schid = q - > irq_ptr - > sch_token ;
fc | = 0x80 ;
}
2008-12-25 15:38:48 +03:00
else
schid = * ( ( u32 * ) & q - > irq_ptr - > schid ) ;
2008-07-17 19:16:48 +04:00
again :
2008-12-25 15:38:48 +03:00
cc = do_siga_output ( schid , q - > mask , busy_bit , fc ) ;
/* hipersocket busy condition */
if ( * busy_bit ) {
WARN_ON ( queue_type ( q ) ! = QDIO_IQDIO_QFMT | | cc ! = 2 ) ;
2008-08-21 21:46:34 +04:00
2008-12-25 15:38:48 +03:00
if ( ! start_time ) {
2008-07-17 19:16:48 +04:00
start_time = get_usecs ( ) ;
2008-12-25 15:38:48 +03:00
goto again ;
}
if ( ( get_usecs ( ) - start_time ) < QDIO_BUSY_BIT_PATIENCE )
2008-07-17 19:16:48 +04:00
goto again ;
}
return cc ;
}
static inline int qdio_siga_input ( struct qdio_q * q )
{
int cc ;
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " siga-r:%1d " , q - > nr ) ;
2008-07-17 19:16:48 +04:00
qdio_perf_stat_inc ( & perf_stats . siga_in ) ;
cc = do_siga_input ( q - > irq_ptr - > schid , q - > mask ) ;
if ( cc )
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x SIGA-R:%2d " , SCH_NO ( q ) , cc ) ;
2008-07-17 19:16:48 +04:00
return cc ;
}
/* called from thinint inbound handler */
void qdio_sync_after_thinint ( struct qdio_q * q )
{
if ( pci_out_supported ( q ) ) {
if ( need_siga_sync_thinint ( q ) )
qdio_siga_sync_all ( q ) ;
else if ( need_siga_sync_out_thinint ( q ) )
qdio_siga_sync_out ( q ) ;
} else
qdio_siga_sync_q ( q ) ;
}
inline void qdio_stop_polling ( struct qdio_q * q )
{
2008-12-25 15:38:47 +03:00
if ( ! q - > u . in . polling )
2008-07-17 19:16:48 +04:00
return ;
2008-12-25 15:38:47 +03:00
2008-07-17 19:16:48 +04:00
q - > u . in . polling = 0 ;
qdio_perf_stat_inc ( & perf_stats . debug_stop_polling ) ;
/* show the card that we are not polling anymore */
2008-12-25 15:38:47 +03:00
if ( is_qebsm ( q ) ) {
2009-03-26 17:24:29 +03:00
set_buf_states ( q , q - > u . in . ack_start , SLSB_P_INPUT_NOT_INIT ,
2008-12-25 15:38:47 +03:00
q - > u . in . ack_count ) ;
q - > u . in . ack_count = 0 ;
} else
2009-03-26 17:24:29 +03:00
set_buf_state ( q , q - > u . in . ack_start , SLSB_P_INPUT_NOT_INIT ) ;
2008-07-17 19:16:48 +04:00
}
2008-12-25 15:38:47 +03:00
static void announce_buffer_error ( struct qdio_q * q , int count )
2008-07-17 19:16:48 +04:00
{
2008-12-25 15:38:48 +03:00
q - > qdio_error | = QDIO_ERROR_SLSB_STATE ;
2008-12-25 15:38:47 +03:00
/* special handling for no target buffer empty */
if ( ( ! q - > is_input_q & &
( q - > sbal [ q - > first_to_check ] - > element [ 15 ] . flags & 0xff ) = = 0x10 ) ) {
qdio_perf_stat_inc ( & perf_stats . outbound_target_full ) ;
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " OUTFULL FTC:%3d " ,
q - > first_to_check ) ;
return ;
}
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x BUF ERROR " , SCH_NO ( q ) ) ;
DBF_ERROR ( ( q - > is_input_q ) ? " IN:%2d " : " OUT:%2d " , q - > nr ) ;
2008-12-25 15:38:47 +03:00
DBF_ERROR ( " FTC:%3d C:%3d " , q - > first_to_check , count ) ;
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " F14:%2x F15:%2x " ,
q - > sbal [ q - > first_to_check ] - > element [ 14 ] . flags & 0xff ,
q - > sbal [ q - > first_to_check ] - > element [ 15 ] . flags & 0xff ) ;
2008-12-25 15:38:47 +03:00
}
2008-07-17 19:16:48 +04:00
2008-12-25 15:38:47 +03:00
static inline void inbound_primed ( struct qdio_q * q , int count )
{
int new ;
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " in prim: %3d " , count ) ;
/* for QEBSM the ACK was already set by EQBS */
if ( is_qebsm ( q ) ) {
if ( ! q - > u . in . polling ) {
q - > u . in . polling = 1 ;
q - > u . in . ack_count = count ;
2009-03-26 17:24:29 +03:00
q - > u . in . ack_start = q - > first_to_check ;
2008-12-25 15:38:47 +03:00
return ;
}
/* delete the previous ACK's */
2009-03-26 17:24:29 +03:00
set_buf_states ( q , q - > u . in . ack_start , SLSB_P_INPUT_NOT_INIT ,
2008-12-25 15:38:47 +03:00
q - > u . in . ack_count ) ;
q - > u . in . ack_count = count ;
2009-03-26 17:24:29 +03:00
q - > u . in . ack_start = q - > first_to_check ;
2008-12-25 15:38:47 +03:00
return ;
}
/*
* ACK the newest buffer . The ACK will be removed in qdio_stop_polling
* or by the next inbound run .
*/
new = add_buf ( q - > first_to_check , count - 1 ) ;
if ( q - > u . in . polling ) {
/* reset the previous ACK but first set the new one */
set_buf_state ( q , new , SLSB_P_INPUT_ACK ) ;
2009-03-26 17:24:29 +03:00
set_buf_state ( q , q - > u . in . ack_start , SLSB_P_INPUT_NOT_INIT ) ;
2009-03-26 17:24:28 +03:00
} else {
2008-12-25 15:38:47 +03:00
q - > u . in . polling = 1 ;
2009-03-26 17:24:28 +03:00
set_buf_state ( q , new , SLSB_P_INPUT_ACK ) ;
2008-12-25 15:38:47 +03:00
}
2009-03-26 17:24:29 +03:00
q - > u . in . ack_start = new ;
2008-12-25 15:38:47 +03:00
count - - ;
if ( ! count )
return ;
/*
* Need to change all PRIMED buffers to NOT_INIT , otherwise
* we ' re loosing initiative in the thinint code .
*/
2009-03-26 17:24:28 +03:00
set_buf_states ( q , q - > first_to_check , SLSB_P_INPUT_NOT_INIT ,
2008-12-25 15:38:47 +03:00
count ) ;
2008-07-17 19:16:48 +04:00
}
static int get_inbound_buffer_frontier ( struct qdio_q * q )
{
int count , stop ;
unsigned char state ;
/*
* Don ' t check 128 buffers , as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min ( atomic_read ( & q - > nr_buf_used ) , QDIO_MAX_BUFFERS_MASK ) ;
stop = add_buf ( q - > first_to_check , count ) ;
/*
* No siga sync here , as a PCI or we after a thin interrupt
* will sync the queues .
*/
/* need to set count to 1 for non-qebsm */
if ( ! is_qebsm ( q ) )
count = 1 ;
check_next :
if ( q - > first_to_check = = stop )
goto out ;
2008-12-25 15:38:47 +03:00
count = get_buf_states ( q , q - > first_to_check , & state , count , 1 ) ;
2008-07-17 19:16:48 +04:00
if ( ! count )
goto out ;
switch ( state ) {
case SLSB_P_INPUT_PRIMED :
2008-12-25 15:38:47 +03:00
inbound_primed ( q , count ) ;
2008-07-17 19:16:48 +04:00
/*
* No siga - sync needed for non - qebsm here , as the inbound queue
* will be synced on the next siga - r , resp .
* tiqdio_is_inbound_q_done will do the siga - sync .
*/
q - > first_to_check = add_buf ( q - > first_to_check , count ) ;
atomic_sub ( count , & q - > nr_buf_used ) ;
goto check_next ;
case SLSB_P_INPUT_ERROR :
2008-12-25 15:38:47 +03:00
announce_buffer_error ( q , count ) ;
2008-07-17 19:16:48 +04:00
/* process the buffer, the upper layer will take care of it */
q - > first_to_check = add_buf ( q - > first_to_check , count ) ;
atomic_sub ( count , & q - > nr_buf_used ) ;
break ;
case SLSB_CU_INPUT_EMPTY :
case SLSB_P_INPUT_NOT_INIT :
case SLSB_P_INPUT_ACK :
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " in nop " ) ;
2008-07-17 19:16:48 +04:00
break ;
default :
BUG ( ) ;
}
out :
return q - > first_to_check ;
}
int qdio_inbound_q_moved ( struct qdio_q * q )
{
int bufnr ;
bufnr = get_inbound_buffer_frontier ( q ) ;
2009-03-26 17:24:29 +03:00
if ( ( bufnr ! = q - > last_move ) | | q - > qdio_error ) {
q - > last_move = bufnr ;
2008-07-17 19:16:48 +04:00
if ( ! need_siga_sync ( q ) & & ! pci_out_supported ( q ) )
q - > u . in . timestamp = get_usecs ( ) ;
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " in moved " ) ;
2008-07-17 19:16:48 +04:00
return 1 ;
} else
return 0 ;
}
static int qdio_inbound_q_done ( struct qdio_q * q )
{
2008-12-25 15:38:45 +03:00
unsigned char state = 0 ;
2008-07-17 19:16:48 +04:00
if ( ! atomic_read ( & q - > nr_buf_used ) )
return 1 ;
/*
* We need that one for synchronization with the adapter , as it
* does a kind of PCI avoidance .
*/
qdio_siga_sync_q ( q ) ;
2008-12-25 15:38:47 +03:00
get_buf_state ( q , q - > first_to_check , & state , 0 ) ;
2008-07-17 19:16:48 +04:00
if ( state = = SLSB_P_INPUT_PRIMED )
/* we got something to do */
return 0 ;
/* on VM, we don't poll, so the q is always done here */
if ( need_siga_sync ( q ) | | pci_out_supported ( q ) )
return 1 ;
/*
* At this point we know , that inbound first_to_check
* has ( probably ) not moved ( see qdio_inbound_processing ) .
*/
if ( get_usecs ( ) > q - > u . in . timestamp + QDIO_INPUT_THRESHOLD ) {
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " in done:%3d " ,
q - > first_to_check ) ;
2008-07-17 19:16:48 +04:00
return 1 ;
} else {
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " in notd:%3d " ,
q - > first_to_check ) ;
2008-07-17 19:16:48 +04:00
return 0 ;
}
}
2009-03-26 17:24:32 +03:00
void qdio_kick_handler ( struct qdio_q * q )
2008-07-17 19:16:48 +04:00
{
2009-03-26 17:24:32 +03:00
int start = q - > first_to_kick ;
int end = q - > first_to_check ;
int count ;
2008-07-17 19:16:48 +04:00
if ( unlikely ( q - > irq_ptr - > state ! = QDIO_IRQ_STATE_ACTIVE ) )
return ;
2009-03-26 17:24:32 +03:00
count = sub_buf ( end , start ) ;
if ( q - > is_input_q ) {
qdio_perf_stat_inc ( & perf_stats . inbound_handler ) ;
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " kih s:%3d c:%3d " , start , count ) ;
} else {
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " koh: nr:%1d " , q - > nr ) ;
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " s:%3d c:%3d " , start , count ) ;
}
q - > handler ( q - > irq_ptr - > cdev , q - > qdio_error , q - > nr , start , count ,
q - > irq_ptr - > int_parm ) ;
2008-07-17 19:16:48 +04:00
/* for the next time */
2009-03-26 17:24:32 +03:00
q - > first_to_kick = end ;
2008-07-17 19:16:48 +04:00
q - > qdio_error = 0 ;
}
static void __qdio_inbound_processing ( struct qdio_q * q )
{
qdio_perf_stat_inc ( & perf_stats . tasklet_inbound ) ;
again :
if ( ! qdio_inbound_q_moved ( q ) )
return ;
2009-03-26 17:24:32 +03:00
qdio_kick_handler ( q ) ;
2008-07-17 19:16:48 +04:00
if ( ! qdio_inbound_q_done ( q ) )
/* means poll time is not yet over */
goto again ;
qdio_stop_polling ( q ) ;
/*
* We need to check again to not lose initiative after
* resetting the ACK state .
*/
if ( ! qdio_inbound_q_done ( q ) )
goto again ;
}
/* inbound tasklet */
void qdio_inbound_processing ( unsigned long data )
{
struct qdio_q * q = ( struct qdio_q * ) data ;
__qdio_inbound_processing ( q ) ;
}
static int get_outbound_buffer_frontier ( struct qdio_q * q )
{
int count , stop ;
unsigned char state ;
if ( ( ( queue_type ( q ) ! = QDIO_IQDIO_QFMT ) & & ! pci_out_supported ( q ) ) | |
( queue_type ( q ) = = QDIO_IQDIO_QFMT & & multicast_outbound ( q ) ) )
qdio_siga_sync_q ( q ) ;
/*
* Don ' t check 128 buffers , as otherwise qdio_inbound_q_moved
* would return 0.
*/
count = min ( atomic_read ( & q - > nr_buf_used ) , QDIO_MAX_BUFFERS_MASK ) ;
stop = add_buf ( q - > first_to_check , count ) ;
/* need to set count to 1 for non-qebsm */
if ( ! is_qebsm ( q ) )
count = 1 ;
check_next :
if ( q - > first_to_check = = stop )
return q - > first_to_check ;
2008-12-25 15:38:47 +03:00
count = get_buf_states ( q , q - > first_to_check , & state , count , 0 ) ;
2008-07-17 19:16:48 +04:00
if ( ! count )
return q - > first_to_check ;
switch ( state ) {
case SLSB_P_OUTPUT_EMPTY :
/* the adapter got it */
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " out empty:%1d %3d " , q - > nr , count ) ;
2008-07-17 19:16:48 +04:00
atomic_sub ( count , & q - > nr_buf_used ) ;
q - > first_to_check = add_buf ( q - > first_to_check , count ) ;
/*
* We fetch all buffer states at once . get_buf_states may
* return count < stop . For QEBSM we do not loop .
*/
if ( is_qebsm ( q ) )
break ;
goto check_next ;
case SLSB_P_OUTPUT_ERROR :
2008-12-25 15:38:47 +03:00
announce_buffer_error ( q , count ) ;
2008-07-17 19:16:48 +04:00
/* process the buffer, the upper layer will take care of it */
q - > first_to_check = add_buf ( q - > first_to_check , count ) ;
atomic_sub ( count , & q - > nr_buf_used ) ;
break ;
case SLSB_CU_OUTPUT_PRIMED :
/* the adapter has not fetched the output yet */
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " out primed:%1d " , q - > nr ) ;
2008-07-17 19:16:48 +04:00
break ;
case SLSB_P_OUTPUT_NOT_INIT :
case SLSB_P_OUTPUT_HALTED :
break ;
default :
BUG ( ) ;
}
return q - > first_to_check ;
}
/* all buffers processed? */
static inline int qdio_outbound_q_done ( struct qdio_q * q )
{
return atomic_read ( & q - > nr_buf_used ) = = 0 ;
}
static inline int qdio_outbound_q_moved ( struct qdio_q * q )
{
int bufnr ;
bufnr = get_outbound_buffer_frontier ( q ) ;
2009-03-26 17:24:29 +03:00
if ( ( bufnr ! = q - > last_move ) | | q - > qdio_error ) {
q - > last_move = bufnr ;
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " out moved:%1d " , q - > nr ) ;
2008-07-17 19:16:48 +04:00
return 1 ;
} else
return 0 ;
}
2009-03-26 17:24:31 +03:00
static int qdio_kick_outbound_q ( struct qdio_q * q )
2008-07-17 19:16:48 +04:00
{
2008-12-25 15:38:48 +03:00
unsigned int busy_bit ;
int cc ;
2008-07-17 19:16:48 +04:00
if ( ! need_siga_out ( q ) )
2009-03-26 17:24:31 +03:00
return 0 ;
2008-07-17 19:16:48 +04:00
2008-12-25 15:38:48 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " siga-w:%1d " , q - > nr ) ;
qdio_perf_stat_inc ( & perf_stats . siga_out ) ;
cc = qdio_siga_output ( q , & busy_bit ) ;
switch ( cc ) {
2008-07-17 19:16:48 +04:00
case 0 :
break ;
2008-12-25 15:38:48 +03:00
case 2 :
if ( busy_bit ) {
DBF_ERROR ( " %4x cc2 REP:%1d " , SCH_NO ( q ) , q - > nr ) ;
2009-03-26 17:24:31 +03:00
cc | = QDIO_ERROR_SIGA_BUSY ;
} else
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " siga-w cc2:%1d " , q - > nr ) ;
2008-12-25 15:38:48 +03:00
break ;
case 1 :
case 3 :
DBF_ERROR ( " %4x SIGA-W:%1d " , SCH_NO ( q ) , cc ) ;
break ;
2008-07-17 19:16:48 +04:00
}
2009-03-26 17:24:31 +03:00
return cc ;
2008-07-17 19:16:48 +04:00
}
static void __qdio_outbound_processing ( struct qdio_q * q )
{
qdio_perf_stat_inc ( & perf_stats . tasklet_outbound ) ;
BUG_ON ( atomic_read ( & q - > nr_buf_used ) < 0 ) ;
if ( qdio_outbound_q_moved ( q ) )
2009-03-26 17:24:32 +03:00
qdio_kick_handler ( q ) ;
2008-07-17 19:16:48 +04:00
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
if ( queue_type ( q ) = = QDIO_ZFCP_QFMT )
2008-07-17 19:16:48 +04:00
if ( ! pci_out_supported ( q ) & & ! qdio_outbound_q_done ( q ) )
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
goto sched ;
2008-07-17 19:16:48 +04:00
/* bail out for HiperSockets unicast queues */
if ( queue_type ( q ) = = QDIO_IQDIO_QFMT & & ! multicast_outbound ( q ) )
return ;
2008-10-10 23:33:04 +04:00
if ( ( queue_type ( q ) = = QDIO_IQDIO_QFMT ) & &
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
( atomic_read ( & q - > nr_buf_used ) ) > QDIO_IQDIO_POLL_LVL )
goto sched ;
2008-10-10 23:33:04 +04:00
2008-07-17 19:16:48 +04:00
if ( q - > u . out . pci_out_enabled )
return ;
/*
* Now we know that queue type is either qeth without pci enabled
* or HiperSockets multicast . Make sure buffer switch from PRIMED to
* EMPTY is noticed and outbound_handler is called after some time .
*/
if ( qdio_outbound_q_done ( q ) )
del_timer ( & q - > u . out . timer ) ;
else {
if ( ! timer_pending ( & q - > u . out . timer ) ) {
mod_timer ( & q - > u . out . timer , jiffies + 10 * HZ ) ;
qdio_perf_stat_inc ( & perf_stats . debug_tl_out_timer ) ;
}
}
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
return ;
sched :
if ( unlikely ( q - > irq_ptr - > state = = QDIO_IRQ_STATE_STOPPED ) )
return ;
tasklet_schedule ( & q - > tasklet ) ;
2008-07-17 19:16:48 +04:00
}
/* outbound tasklet */
void qdio_outbound_processing ( unsigned long data )
{
struct qdio_q * q = ( struct qdio_q * ) data ;
__qdio_outbound_processing ( q ) ;
}
void qdio_outbound_timer ( unsigned long data )
{
struct qdio_q * q = ( struct qdio_q * ) data ;
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
if ( unlikely ( q - > irq_ptr - > state = = QDIO_IRQ_STATE_STOPPED ) )
return ;
2008-07-17 19:16:48 +04:00
tasklet_schedule ( & q - > tasklet ) ;
}
/* called from thinint inbound tasklet */
void qdio_check_outbound_after_thinint ( struct qdio_q * q )
{
struct qdio_q * out ;
int i ;
if ( ! pci_out_supported ( q ) )
return ;
for_each_output_queue ( q - > irq_ptr , out , i )
if ( ! qdio_outbound_q_done ( out ) )
tasklet_schedule ( & out - > tasklet ) ;
}
static inline void qdio_set_state ( struct qdio_irq * irq_ptr ,
enum qdio_irq_states state )
{
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " newstate: %1d " , state ) ;
2008-07-17 19:16:48 +04:00
irq_ptr - > state = state ;
mb ( ) ;
}
2008-12-25 15:38:46 +03:00
static void qdio_irq_check_sense ( struct qdio_irq * irq_ptr , struct irb * irb )
2008-07-17 19:16:48 +04:00
{
if ( irb - > esw . esw0 . erw . cons ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x sense: " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR_HEX ( irb , 64 ) ;
DBF_ERROR_HEX ( irb - > ecw , 64 ) ;
2008-07-17 19:16:48 +04:00
}
}
/* PCI interrupt handler */
static void qdio_int_handler_pci ( struct qdio_irq * irq_ptr )
{
int i ;
struct qdio_q * q ;
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
if ( unlikely ( irq_ptr - > state = = QDIO_IRQ_STATE_STOPPED ) )
return ;
2008-07-17 19:16:48 +04:00
qdio_perf_stat_inc ( & perf_stats . pci_int ) ;
for_each_input_queue ( irq_ptr , q , i )
tasklet_schedule ( & q - > tasklet ) ;
if ( ! ( irq_ptr - > qib . ac & QIB_AC_OUTBOUND_PCI_SUPPORTED ) )
return ;
for_each_output_queue ( irq_ptr , q , i ) {
if ( qdio_outbound_q_done ( q ) )
continue ;
if ( ! siga_syncs_out_pci ( q ) )
qdio_siga_sync_q ( q ) ;
tasklet_schedule ( & q - > tasklet ) ;
}
}
static void qdio_handle_activate_check ( struct ccw_device * cdev ,
unsigned long intparm , int cstat , int dstat )
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
struct qdio_q * q ;
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x ACT CHECK " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR ( " intp :%lx " , intparm ) ;
DBF_ERROR ( " ds: %2x cs:%2x " , dstat , cstat ) ;
2008-07-17 19:16:48 +04:00
if ( irq_ptr - > nr_input_qs ) {
q = irq_ptr - > input_qs [ 0 ] ;
} else if ( irq_ptr - > nr_output_qs ) {
q = irq_ptr - > output_qs [ 0 ] ;
} else {
dump_stack ( ) ;
goto no_handler ;
}
q - > handler ( q - > irq_ptr - > cdev , QDIO_ERROR_ACTIVATE_CHECK_CONDITION ,
0 , - 1 , - 1 , irq_ptr - > int_parm ) ;
no_handler :
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_STOPPED ) ;
}
static void qdio_call_shutdown ( struct work_struct * work )
{
struct ccw_device_private * priv ;
struct ccw_device * cdev ;
priv = container_of ( work , struct ccw_device_private , kick_work ) ;
cdev = priv - > cdev ;
qdio_shutdown ( cdev , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
put_device ( & cdev - > dev ) ;
}
static void qdio_int_error ( struct ccw_device * cdev )
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
switch ( irq_ptr - > state ) {
case QDIO_IRQ_STATE_INACTIVE :
case QDIO_IRQ_STATE_CLEANUP :
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_ERR ) ;
break ;
case QDIO_IRQ_STATE_ESTABLISHED :
case QDIO_IRQ_STATE_ACTIVE :
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_STOPPED ) ;
if ( get_device ( & cdev - > dev ) ) {
/* Can't call shutdown from interrupt context. */
PREPARE_WORK ( & cdev - > private - > kick_work ,
qdio_call_shutdown ) ;
queue_work ( ccw_device_work , & cdev - > private - > kick_work ) ;
}
break ;
default :
WARN_ON ( 1 ) ;
}
wake_up ( & cdev - > private - > wait_q ) ;
}
static int qdio_establish_check_errors ( struct ccw_device * cdev , int cstat ,
2008-12-25 15:38:46 +03:00
int dstat )
2008-07-17 19:16:48 +04:00
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
if ( cstat | | ( dstat & ~ ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) ) ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " EQ:ck con " ) ;
2008-07-17 19:16:48 +04:00
goto error ;
}
if ( ! ( dstat & DEV_STAT_DEV_END ) ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " EQ:no dev " ) ;
2008-07-17 19:16:48 +04:00
goto error ;
}
if ( dstat & ~ ( DEV_STAT_CHN_END | DEV_STAT_DEV_END ) ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " EQ: bad io " ) ;
2008-07-17 19:16:48 +04:00
goto error ;
}
return 0 ;
error :
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x EQ:error " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR ( " ds: %2x cs:%2x " , dstat , cstat ) ;
2008-07-17 19:16:48 +04:00
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_ERR ) ;
return 1 ;
}
static void qdio_establish_handle_irq ( struct ccw_device * cdev , int cstat ,
int dstat )
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " qest irq " ) ;
2008-07-17 19:16:48 +04:00
if ( ! qdio_establish_check_errors ( cdev , cstat , dstat ) )
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_ESTABLISHED ) ;
}
/* qdio interrupt handler */
void qdio_int_handler ( struct ccw_device * cdev , unsigned long intparm ,
struct irb * irb )
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
int cstat , dstat ;
qdio_perf_stat_inc ( & perf_stats . qdio_int ) ;
if ( ! intparm | | ! irq_ptr ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " qint:%4x " , cdev - > private - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
return ;
}
if ( IS_ERR ( irb ) ) {
switch ( PTR_ERR ( irb ) ) {
case - EIO :
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x IO error " , irq_ptr - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
return ;
case - ETIMEDOUT :
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x IO timeout " , irq_ptr - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
qdio_int_error ( cdev ) ;
return ;
default :
WARN_ON ( 1 ) ;
return ;
}
}
2008-12-25 15:38:46 +03:00
qdio_irq_check_sense ( irq_ptr , irb ) ;
2008-07-17 19:16:48 +04:00
cstat = irb - > scsw . cmd . cstat ;
dstat = irb - > scsw . cmd . dstat ;
switch ( irq_ptr - > state ) {
case QDIO_IRQ_STATE_INACTIVE :
qdio_establish_handle_irq ( cdev , cstat , dstat ) ;
break ;
case QDIO_IRQ_STATE_CLEANUP :
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_INACTIVE ) ;
break ;
case QDIO_IRQ_STATE_ESTABLISHED :
case QDIO_IRQ_STATE_ACTIVE :
if ( cstat & SCHN_STAT_PCI ) {
qdio_int_handler_pci ( irq_ptr ) ;
/* no state change so no need to wake up wait_q */
return ;
}
if ( ( cstat & ~ SCHN_STAT_PCI ) | | dstat ) {
qdio_handle_activate_check ( cdev , intparm , cstat ,
dstat ) ;
break ;
}
default :
WARN_ON ( 1 ) ;
}
wake_up ( & cdev - > private - > wait_q ) ;
}
/**
* qdio_get_ssqd_desc - get qdio subchannel description
* @ cdev : ccw device to get description for
2008-12-25 15:38:43 +03:00
* @ data : where to store the ssqd
2008-07-17 19:16:48 +04:00
*
2008-12-25 15:38:43 +03:00
* Returns 0 or an error code . The results of the chsc are stored in the
* specified structure .
2008-07-17 19:16:48 +04:00
*/
2008-12-25 15:38:43 +03:00
int qdio_get_ssqd_desc ( struct ccw_device * cdev ,
struct qdio_ssqd_desc * data )
2008-07-17 19:16:48 +04:00
{
2008-12-25 15:38:43 +03:00
if ( ! cdev | | ! cdev - > private )
return - EINVAL ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " get ssqd:%4x " , cdev - > private - > schid . sch_no ) ;
2008-12-25 15:38:43 +03:00
return qdio_setup_get_ssqd ( NULL , & cdev - > private - > schid , data ) ;
2008-07-17 19:16:48 +04:00
}
EXPORT_SYMBOL_GPL ( qdio_get_ssqd_desc ) ;
/**
* qdio_cleanup - shutdown queues and free data structures
* @ cdev : associated ccw device
* @ how : use halt or clear to shutdown
*
2009-03-26 17:24:27 +03:00
* This function calls qdio_shutdown ( ) for @ cdev with method @ how .
* and qdio_free ( ) . The qdio_free ( ) return value is ignored since
* ! irq_ptr is already checked .
2008-07-17 19:16:48 +04:00
*/
int qdio_cleanup ( struct ccw_device * cdev , int how )
{
2008-12-25 15:38:46 +03:00
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
2008-07-17 19:16:48 +04:00
int rc ;
if ( ! irq_ptr )
return - ENODEV ;
rc = qdio_shutdown ( cdev , how ) ;
2009-03-26 17:24:27 +03:00
qdio_free ( cdev ) ;
2008-07-17 19:16:48 +04:00
return rc ;
}
EXPORT_SYMBOL_GPL ( qdio_cleanup ) ;
static void qdio_shutdown_queues ( struct ccw_device * cdev )
{
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
struct qdio_q * q ;
int i ;
for_each_input_queue ( irq_ptr , q , i )
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
tasklet_kill ( & q - > tasklet ) ;
2008-07-17 19:16:48 +04:00
for_each_output_queue ( irq_ptr , q , i ) {
del_timer ( & q - > u . out . timer ) ;
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
tasklet_kill ( & q - > tasklet ) ;
2008-07-17 19:16:48 +04:00
}
}
/**
* qdio_shutdown - shut down a qdio subchannel
* @ cdev : associated ccw device
* @ how : use halt or clear to shutdown
*/
int qdio_shutdown ( struct ccw_device * cdev , int how )
{
2008-12-25 15:38:46 +03:00
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
2008-07-17 19:16:48 +04:00
int rc ;
unsigned long flags ;
if ( ! irq_ptr )
return - ENODEV ;
2009-03-26 17:24:24 +03:00
BUG_ON ( irqs_disabled ( ) ) ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qshutdown:%4x " , cdev - > private - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
mutex_lock ( & irq_ptr - > setup_mutex ) ;
/*
* Subchannel was already shot down . We cannot prevent being called
* twice since cio may trigger a shutdown asynchronously .
*/
if ( irq_ptr - > state = = QDIO_IRQ_STATE_INACTIVE ) {
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
return 0 ;
}
[S390] qdio: proper kill of qdio tasklets
The queue tasklets were stopped with tasklet_disable. Although tasklet_disable
prevents the tasklet from beeing executed it is still possible that a tasklet
is scheduled on a CPU at that point. A following qdio_establish calls
tasklet_init which clears the tasklet count and the tasklet state leading to
the following Oops:
<2>kernel BUG at kernel/softirq.c:392!
<4>illegal operation: 0001 [#1] SMP
<4>Modules linked in: iptable_filter ip_tables x_tables dm_round_robin dm_multipath scsi_dh sg sd_mod crc_t10dif nfs lockd nfs
_acl sunrpc fuse loop dm_mod qeth_l3 ipv6 zfcp qeth scsi_transport_fc qdio scsi_tgt scsi_mod chsc_sch ccwgroup dasd_eckd_mod dasdm
od ext3 mbcache jbd
<4>Supported: Yes
<4>CPU: 0 Not tainted 2.6.27.13-1.1.mz13-default #1
<4>Process blast.LzS_64 (pid: 16445, task: 000000006cc02538, ksp: 000000006cb67998)
<4>Krnl PSW : 0704c00180000000 00000000001399f4 (tasklet_action+0xc8/0x1d4)
<4> R:0 T:1 IO:1 EX:1 Key:0 M:1 W:0 P:0 AS:3 CC:0 PM:0 EA:3
<4>Krnl GPRS: ffffffff00000030 0000000000000002 0000000000000002 fffffffffffffffe
<4> 000000000013aabe 00000000003b6a18 fffffffffffffffd 0000000000000000
<4> 00000000006705a8 000000007d0914a8 000000007d0914b0 000000007fecfd30
<4> 0000000000000000 00000000003b63e8 000000007fecfd90 000000007fecfd30
<4>Krnl Code: 00000000001399e8: b9200021 cgr %r2,%r1
<4> 00000000001399ec: a7740004 brc 7,1399f4
<4> 00000000001399f0: a7f40001 brc 15,1399f2
<4> >00000000001399f4: c0100027e8ee larl %r1,636bd0
<4> 00000000001399fa: bf1f1008 icm %r1,15,8(%r1)
<4> 00000000001399fe: a7840019 brc 8,139a30
<4> 0000000000139a02: c0300027e8ef larl %r3,636be0
<4> 0000000000139a08: e3c030000004 lg %r12,0(%r3)
<4>Call Trace:
<4>([<0000000000139c12>] tasklet_hi_action+0x112/0x1d4)
<4> [<000000000013aabe>] __do_softirq+0xde/0x1c4
<4> [<000000000010fa2e>] do_softirq+0x96/0xb0
<4> [<000000000013a8d8>] irq_exit+0x70/0xcc
<4> [<000000000010d1d8>] do_extint+0xf0/0x110
<4> [<0000000000113b10>] ext_no_vtime+0x16/0x1a
<4> [<000003e0000a3662>] ext3_dirty_inode+0xe6/0xe8 [ext3]
<4>([<00000000001f6cf2>] __mark_inode_dirty+0x52/0x1d4)
<4> [<000003e0000a44f0>] ext3_ordered_write_end+0x138/0x190 [ext3]
<4> [<000000000018d5ec>] generic_perform_write+0x174/0x230
<4> [<0000000000190144>] generic_file_buffered_write+0xb4/0x194
<4> [<0000000000190864>] __generic_file_aio_write_nolock+0x418/0x454
<4> [<0000000000190ee2>] generic_file_aio_write+0x76/0xe4
<4> [<000003e0000a05c2>] ext3_file_write+0x3e/0xc8 [ext3]
<4> [<00000000001cc2fe>] do_sync_write+0xd6/0x120
<4> [<00000000001ccfc8>] vfs_write+0xac/0x184
<4> [<00000000001cd218>] SyS_write+0x68/0xe0
<4> [<0000000000113402>] sysc_noemu+0x10/0x16
<4> [<0000020000043188>] 0x20000043188
<4>Last Breaking-Event-Address:
<4> [<00000000001399f0>] tasklet_action+0xc4/0x1d4
<6>qdio: 0.0.c61b ZFCP on SC f67 using AI:1 QEBSM:0 PCI:1 TDD:1 SIGA: W AOP
<4> <0>Kernel panic - not syncing: Fatal exception in interrupt
Use tasklet_kill instead of tasklet_disbale. Since tasklet_schedule must not be
called after tasklet_kill use the QDIO_IRQ_STATE_STOPPED to inidicate that a
queue is going down and prevent further tasklet schedules in that case.
Remove superflous tasklet_schedule from input queue setup, at that time
the queues are not ready so the schedule results in a NOP.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
2009-03-26 17:24:26 +03:00
/*
* Indicate that the device is going down . Scheduling the queue
* tasklets is forbidden from here on .
*/
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_STOPPED ) ;
2008-07-17 19:16:48 +04:00
tiqdio_remove_input_queues ( irq_ptr ) ;
qdio_shutdown_queues ( cdev ) ;
qdio_shutdown_debug_entries ( irq_ptr , cdev ) ;
/* cleanup subchannel */
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
if ( how & QDIO_FLAG_CLEANUP_USING_CLEAR )
rc = ccw_device_clear ( cdev , QDIO_DOING_CLEANUP ) ;
else
/* default behaviour is halt */
rc = ccw_device_halt ( cdev , QDIO_DOING_CLEANUP ) ;
if ( rc ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x SHUTD ERR " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR ( " rc:%4d " , rc ) ;
2008-07-17 19:16:48 +04:00
goto no_cleanup ;
}
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_CLEANUP ) ;
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
wait_event_interruptible_timeout ( cdev - > private - > wait_q ,
irq_ptr - > state = = QDIO_IRQ_STATE_INACTIVE | |
irq_ptr - > state = = QDIO_IRQ_STATE_ERR ,
10 * HZ ) ;
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , flags ) ;
no_cleanup :
qdio_shutdown_thinint ( irq_ptr ) ;
/* restore interrupt handler */
if ( ( void * ) cdev - > handler = = ( void * ) qdio_int_handler )
cdev - > handler = irq_ptr - > orig_handler ;
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , flags ) ;
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_INACTIVE ) ;
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
if ( rc )
return rc ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qdio_shutdown ) ;
/**
* qdio_free - free data structures for a qdio subchannel
* @ cdev : associated ccw device
*/
int qdio_free ( struct ccw_device * cdev )
{
2008-12-25 15:38:46 +03:00
struct qdio_irq * irq_ptr = cdev - > private - > qdio_data ;
2008-08-21 21:46:34 +04:00
2008-07-17 19:16:48 +04:00
if ( ! irq_ptr )
return - ENODEV ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qfree:%4x " , cdev - > private - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
mutex_lock ( & irq_ptr - > setup_mutex ) ;
2008-12-25 15:38:46 +03:00
if ( irq_ptr - > debug_area ! = NULL ) {
debug_unregister ( irq_ptr - > debug_area ) ;
irq_ptr - > debug_area = NULL ;
}
2008-07-17 19:16:48 +04:00
cdev - > private - > qdio_data = NULL ;
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
qdio_release_memory ( irq_ptr ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qdio_free ) ;
/**
* qdio_initialize - allocate and establish queues for a qdio subchannel
* @ init_data : initialization data
*
* This function first allocates queues via qdio_allocate ( ) and on success
* establishes them via qdio_establish ( ) .
*/
int qdio_initialize ( struct qdio_initialize * init_data )
{
int rc ;
rc = qdio_allocate ( init_data ) ;
if ( rc )
return rc ;
rc = qdio_establish ( init_data ) ;
if ( rc )
qdio_free ( init_data - > cdev ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( qdio_initialize ) ;
/**
* qdio_allocate - allocate qdio queues and associated data
* @ init_data : initialization data
*/
int qdio_allocate ( struct qdio_initialize * init_data )
{
struct qdio_irq * irq_ptr ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qallocate:%4x " , init_data - > cdev - > private - > schid . sch_no ) ;
2008-07-17 19:16:48 +04:00
if ( ( init_data - > no_input_qs & & ! init_data - > input_handler ) | |
( init_data - > no_output_qs & & ! init_data - > output_handler ) )
return - EINVAL ;
if ( ( init_data - > no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ) | |
( init_data - > no_output_qs > QDIO_MAX_QUEUES_PER_IRQ ) )
return - EINVAL ;
if ( ( ! init_data - > input_sbal_addr_array ) | |
( ! init_data - > output_sbal_addr_array ) )
return - EINVAL ;
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
irq_ptr = ( void * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
if ( ! irq_ptr )
goto out_err ;
mutex_init ( & irq_ptr - > setup_mutex ) ;
2008-12-25 15:38:46 +03:00
qdio_allocate_dbf ( init_data , irq_ptr ) ;
2008-07-17 19:16:48 +04:00
/*
* Allocate a page for the chsc calls in qdio_establish .
* Must be pre - allocated since a zfcp recovery will call
* qdio_establish . In case of low memory and swap on a zfcp disk
* we may not be able to allocate memory otherwise .
*/
irq_ptr - > chsc_page = get_zeroed_page ( GFP_KERNEL ) ;
if ( ! irq_ptr - > chsc_page )
goto out_rel ;
/* qdr is used in ccw1.cda which is u32 */
2008-08-01 18:39:17 +04:00
irq_ptr - > qdr = ( struct qdr * ) get_zeroed_page ( GFP_KERNEL | GFP_DMA ) ;
2008-07-17 19:16:48 +04:00
if ( ! irq_ptr - > qdr )
goto out_rel ;
WARN_ON ( ( unsigned long ) irq_ptr - > qdr & 0xfff ) ;
if ( qdio_allocate_qs ( irq_ptr , init_data - > no_input_qs ,
init_data - > no_output_qs ) )
goto out_rel ;
init_data - > cdev - > private - > qdio_data = irq_ptr ;
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_INACTIVE ) ;
return 0 ;
out_rel :
qdio_release_memory ( irq_ptr ) ;
out_err :
return - ENOMEM ;
}
EXPORT_SYMBOL_GPL ( qdio_allocate ) ;
/**
* qdio_establish - establish queues on a qdio subchannel
* @ init_data : initialization data
*/
int qdio_establish ( struct qdio_initialize * init_data )
{
struct qdio_irq * irq_ptr ;
struct ccw_device * cdev = init_data - > cdev ;
unsigned long saveflags ;
int rc ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qestablish:%4x " , cdev - > private - > schid . sch_no ) ;
2008-08-21 21:46:34 +04:00
2008-07-17 19:16:48 +04:00
irq_ptr = cdev - > private - > qdio_data ;
if ( ! irq_ptr )
return - ENODEV ;
if ( cdev - > private - > state ! = DEV_STATE_ONLINE )
return - EINVAL ;
mutex_lock ( & irq_ptr - > setup_mutex ) ;
qdio_setup_irq ( init_data ) ;
rc = qdio_establish_thinint ( irq_ptr ) ;
if ( rc ) {
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
qdio_shutdown ( cdev , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
return rc ;
}
/* establish q */
irq_ptr - > ccw . cmd_code = irq_ptr - > equeue . cmd ;
irq_ptr - > ccw . flags = CCW_FLAG_SLI ;
irq_ptr - > ccw . count = irq_ptr - > equeue . count ;
irq_ptr - > ccw . cda = ( u32 ) ( ( addr_t ) irq_ptr - > qdr ) ;
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , saveflags ) ;
ccw_device_set_options_mask ( cdev , 0 ) ;
rc = ccw_device_start ( cdev , & irq_ptr - > ccw , QDIO_DOING_ESTABLISH , 0 , 0 ) ;
if ( rc ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x est IO ERR " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR ( " rc:%4x " , rc ) ;
2008-07-17 19:16:48 +04:00
}
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , saveflags ) ;
if ( rc ) {
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
qdio_shutdown ( cdev , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
return rc ;
}
wait_event_interruptible_timeout ( cdev - > private - > wait_q ,
irq_ptr - > state = = QDIO_IRQ_STATE_ESTABLISHED | |
irq_ptr - > state = = QDIO_IRQ_STATE_ERR , HZ ) ;
if ( irq_ptr - > state ! = QDIO_IRQ_STATE_ESTABLISHED ) {
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
qdio_shutdown ( cdev , QDIO_FLAG_CLEANUP_USING_CLEAR ) ;
return - EIO ;
}
qdio_setup_ssqd_info ( irq_ptr ) ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qDmmwc:%2x " , irq_ptr - > ssqd_desc . mmwc ) ;
DBF_EVENT ( " qib ac:%4x " , irq_ptr - > qib . ac ) ;
2008-07-17 19:16:48 +04:00
/* qebsm is now setup if available, initialize buffer states */
qdio_init_buf_states ( irq_ptr ) ;
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
qdio_print_subchannel_info ( irq_ptr , cdev ) ;
qdio_setup_debug_entries ( irq_ptr , cdev ) ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( qdio_establish ) ;
/**
* qdio_activate - activate queues on a qdio subchannel
* @ cdev : associated cdev
*/
int qdio_activate ( struct ccw_device * cdev )
{
struct qdio_irq * irq_ptr ;
int rc ;
unsigned long saveflags ;
2008-12-25 15:38:46 +03:00
DBF_EVENT ( " qactivate:%4x " , cdev - > private - > schid . sch_no ) ;
2008-08-21 21:46:34 +04:00
2008-07-17 19:16:48 +04:00
irq_ptr = cdev - > private - > qdio_data ;
if ( ! irq_ptr )
return - ENODEV ;
if ( cdev - > private - > state ! = DEV_STATE_ONLINE )
return - EINVAL ;
mutex_lock ( & irq_ptr - > setup_mutex ) ;
if ( irq_ptr - > state = = QDIO_IRQ_STATE_INACTIVE ) {
rc = - EBUSY ;
goto out ;
}
irq_ptr - > ccw . cmd_code = irq_ptr - > aqueue . cmd ;
irq_ptr - > ccw . flags = CCW_FLAG_SLI ;
irq_ptr - > ccw . count = irq_ptr - > aqueue . count ;
irq_ptr - > ccw . cda = 0 ;
spin_lock_irqsave ( get_ccwdev_lock ( cdev ) , saveflags ) ;
ccw_device_set_options ( cdev , CCWDEV_REPORT_ALL ) ;
rc = ccw_device_start ( cdev , & irq_ptr - > ccw , QDIO_DOING_ACTIVATE ,
0 , DOIO_DENY_PREFETCH ) ;
if ( rc ) {
2008-12-25 15:38:46 +03:00
DBF_ERROR ( " %4x act IO ERR " , irq_ptr - > schid . sch_no ) ;
DBF_ERROR ( " rc:%4x " , rc ) ;
2008-07-17 19:16:48 +04:00
}
spin_unlock_irqrestore ( get_ccwdev_lock ( cdev ) , saveflags ) ;
if ( rc )
goto out ;
if ( is_thinint_irq ( irq_ptr ) )
tiqdio_add_input_queues ( irq_ptr ) ;
/* wait for subchannel to become active */
msleep ( 5 ) ;
switch ( irq_ptr - > state ) {
case QDIO_IRQ_STATE_STOPPED :
case QDIO_IRQ_STATE_ERR :
2009-03-26 17:24:25 +03:00
rc = - EIO ;
break ;
2008-07-17 19:16:48 +04:00
default :
qdio_set_state ( irq_ptr , QDIO_IRQ_STATE_ACTIVE ) ;
rc = 0 ;
}
out :
mutex_unlock ( & irq_ptr - > setup_mutex ) ;
return rc ;
}
EXPORT_SYMBOL_GPL ( qdio_activate ) ;
static inline int buf_in_between ( int bufnr , int start , int count )
{
int end = add_buf ( start , count ) ;
if ( end > start ) {
if ( bufnr > = start & & bufnr < end )
return 1 ;
else
return 0 ;
}
/* wrap-around case */
if ( ( bufnr > = start & & bufnr < = QDIO_MAX_BUFFERS_PER_Q ) | |
( bufnr < end ) )
return 1 ;
else
return 0 ;
}
/**
* handle_inbound - reset processed input buffers
* @ q : queue containing the buffers
* @ callflags : flags
* @ bufnr : first buffer to process
* @ count : how many buffers are emptied
*/
2009-03-26 17:24:31 +03:00
static int handle_inbound ( struct qdio_q * q , unsigned int callflags ,
int bufnr , int count )
2008-07-17 19:16:48 +04:00
{
2009-03-26 17:24:31 +03:00
int used , diff ;
2008-07-17 19:16:48 +04:00
2008-12-25 15:38:47 +03:00
if ( ! q - > u . in . polling )
goto set ;
/* protect against stop polling setting an ACK for an emptied slsb */
if ( count = = QDIO_MAX_BUFFERS_PER_Q ) {
/* overwriting everything, just delete polling status */
q - > u . in . polling = 0 ;
q - > u . in . ack_count = 0 ;
goto set ;
2009-03-26 17:24:29 +03:00
} else if ( buf_in_between ( q - > u . in . ack_start , bufnr , count ) ) {
2008-12-25 15:38:47 +03:00
if ( is_qebsm ( q ) ) {
2009-03-26 17:24:29 +03:00
/* partial overwrite, just update ack_start */
2008-12-25 15:38:47 +03:00
diff = add_buf ( bufnr , count ) ;
2009-03-26 17:24:29 +03:00
diff = sub_buf ( diff , q - > u . in . ack_start ) ;
2008-12-25 15:38:47 +03:00
q - > u . in . ack_count - = diff ;
if ( q - > u . in . ack_count < = 0 ) {
q - > u . in . polling = 0 ;
q - > u . in . ack_count = 0 ;
goto set ;
}
2009-03-26 17:24:29 +03:00
q - > u . in . ack_start = add_buf ( q - > u . in . ack_start , diff ) ;
2008-12-25 15:38:47 +03:00
}
else
/* the only ACK will be deleted, so stop polling */
2008-07-17 19:16:48 +04:00
q - > u . in . polling = 0 ;
2008-12-25 15:38:47 +03:00
}
2008-07-17 19:16:48 +04:00
2008-12-25 15:38:47 +03:00
set :
2008-07-17 19:16:48 +04:00
count = set_buf_states ( q , bufnr , SLSB_CU_INPUT_EMPTY , count ) ;
used = atomic_add_return ( count , & q - > nr_buf_used ) - count ;
BUG_ON ( used + count > QDIO_MAX_BUFFERS_PER_Q ) ;
/* no need to signal as long as the adapter had free buffers */
if ( used )
2009-03-26 17:24:31 +03:00
return 0 ;
2008-07-17 19:16:48 +04:00
2009-03-26 17:24:31 +03:00
if ( need_siga_in ( q ) )
return qdio_siga_input ( q ) ;
return 0 ;
2008-07-17 19:16:48 +04:00
}
/**
* handle_outbound - process filled outbound buffers
* @ q : queue containing the buffers
* @ callflags : flags
* @ bufnr : first buffer to process
* @ count : how many buffers are filled
*/
2009-03-26 17:24:31 +03:00
static int handle_outbound ( struct qdio_q * q , unsigned int callflags ,
int bufnr , int count )
2008-07-17 19:16:48 +04:00
{
unsigned char state ;
2009-03-26 17:24:31 +03:00
int used , rc = 0 ;
2008-07-17 19:16:48 +04:00
qdio_perf_stat_inc ( & perf_stats . outbound_handler ) ;
count = set_buf_states ( q , bufnr , SLSB_CU_OUTPUT_PRIMED , count ) ;
used = atomic_add_return ( count , & q - > nr_buf_used ) ;
BUG_ON ( used > QDIO_MAX_BUFFERS_PER_Q ) ;
if ( callflags & QDIO_FLAG_PCI_OUT )
q - > u . out . pci_out_enabled = 1 ;
else
q - > u . out . pci_out_enabled = 0 ;
if ( queue_type ( q ) = = QDIO_IQDIO_QFMT ) {
if ( multicast_outbound ( q ) )
2009-03-26 17:24:31 +03:00
rc = qdio_kick_outbound_q ( q ) ;
2008-07-17 19:16:48 +04:00
else
2008-10-10 23:33:18 +04:00
if ( ( q - > irq_ptr - > ssqd_desc . mmwc > 1 ) & &
( count > 1 ) & &
( count < = q - > irq_ptr - > ssqd_desc . mmwc ) ) {
/* exploit enhanced SIGA */
q - > u . out . use_enh_siga = 1 ;
2009-03-26 17:24:31 +03:00
rc = qdio_kick_outbound_q ( q ) ;
2008-10-10 23:33:18 +04:00
} else {
/*
* One siga - w per buffer required for unicast
* HiperSockets .
*/
q - > u . out . use_enh_siga = 0 ;
2009-03-26 17:24:31 +03:00
while ( count - - ) {
rc = qdio_kick_outbound_q ( q ) ;
if ( rc )
goto out ;
}
2008-10-10 23:33:18 +04:00
}
2008-07-17 19:16:48 +04:00
goto out ;
}
if ( need_siga_sync ( q ) ) {
qdio_siga_sync_q ( q ) ;
goto out ;
}
/* try to fast requeue buffers */
2008-12-25 15:38:47 +03:00
get_buf_state ( q , prev_buf ( bufnr ) , & state , 0 ) ;
2008-07-17 19:16:48 +04:00
if ( state ! = SLSB_CU_OUTPUT_PRIMED )
2009-03-26 17:24:31 +03:00
rc = qdio_kick_outbound_q ( q ) ;
2008-07-17 19:16:48 +04:00
else {
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , q - > irq_ptr , " fast-req " ) ;
2008-07-17 19:16:48 +04:00
qdio_perf_stat_inc ( & perf_stats . fast_requeue ) ;
}
out :
tasklet_schedule ( & q - > tasklet ) ;
2009-03-26 17:24:31 +03:00
return rc ;
2008-07-17 19:16:48 +04:00
}
/**
* do_QDIO - process input or output buffers
* @ cdev : associated ccw_device for the qdio subchannel
* @ callflags : input or output and special flags from the program
* @ q_nr : queue number
* @ bufnr : buffer number
* @ count : how many buffers to process
*/
int do_QDIO ( struct ccw_device * cdev , unsigned int callflags ,
int q_nr , int bufnr , int count )
{
struct qdio_irq * irq_ptr ;
if ( ( bufnr > QDIO_MAX_BUFFERS_PER_Q ) | |
( count > QDIO_MAX_BUFFERS_PER_Q ) | |
( q_nr > QDIO_MAX_QUEUES_PER_IRQ ) )
return - EINVAL ;
if ( ! count )
return 0 ;
irq_ptr = cdev - > private - > qdio_data ;
if ( ! irq_ptr )
return - ENODEV ;
if ( callflags & QDIO_FLAG_SYNC_INPUT )
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " doQDIO input " ) ;
2008-07-17 19:16:48 +04:00
else
2008-12-25 15:38:46 +03:00
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " doQDIO output " ) ;
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " q:%1d flag:%4x " , q_nr , callflags ) ;
DBF_DEV_EVENT ( DBF_INFO , irq_ptr , " buf:%2d cnt:%3d " , bufnr , count ) ;
2008-07-17 19:16:48 +04:00
if ( irq_ptr - > state ! = QDIO_IRQ_STATE_ACTIVE )
return - EBUSY ;
if ( callflags & QDIO_FLAG_SYNC_INPUT )
2009-03-26 17:24:31 +03:00
return handle_inbound ( irq_ptr - > input_qs [ q_nr ] ,
callflags , bufnr , count ) ;
2008-07-17 19:16:48 +04:00
else if ( callflags & QDIO_FLAG_SYNC_OUTPUT )
2009-03-26 17:24:31 +03:00
return handle_outbound ( irq_ptr - > output_qs [ q_nr ] ,
callflags , bufnr , count ) ;
return - EINVAL ;
2008-07-17 19:16:48 +04:00
}
EXPORT_SYMBOL_GPL ( do_QDIO ) ;
static int __init init_QDIO ( void )
{
int rc ;
rc = qdio_setup_init ( ) ;
if ( rc )
return rc ;
rc = tiqdio_allocate_memory ( ) ;
if ( rc )
goto out_cache ;
rc = qdio_debug_init ( ) ;
if ( rc )
goto out_ti ;
rc = qdio_setup_perf_stats ( ) ;
if ( rc )
goto out_debug ;
rc = tiqdio_register_thinints ( ) ;
if ( rc )
goto out_perf ;
return 0 ;
out_perf :
qdio_remove_perf_stats ( ) ;
out_debug :
qdio_debug_exit ( ) ;
out_ti :
tiqdio_free_memory ( ) ;
out_cache :
qdio_setup_exit ( ) ;
return rc ;
}
static void __exit exit_QDIO ( void )
{
tiqdio_unregister_thinints ( ) ;
tiqdio_free_memory ( ) ;
qdio_remove_perf_stats ( ) ;
qdio_debug_exit ( ) ;
qdio_setup_exit ( ) ;
}
module_init ( init_QDIO ) ;
module_exit ( exit_QDIO ) ;