s390/qdio: remove Input tasklet code
Both qeth and zfcp have fully moved to the polling-driven flow for Input Queues with commit 0a6e634535f1 ("s390/qdio: extend polling support to multiple queues") and commit 0b524abc2dd1 ("scsi: zfcp: Lift Input Queue tasklet from qdio"). So remove the tasklet code for Input Queues, streamline the IRQ handlers and push the tasklet struct into struct qdio_output_q. Signed-off-by: Julian Wiedmann <jwi@linux.ibm.com> Reviewed-by: Benjamin Block <bblock@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
1daafea411
commit
1ecbcfd57e
@ -336,7 +336,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int,
|
||||
* @no_output_qs: number of output queues
|
||||
* @input_handler: handler to be called for input queues
|
||||
* @output_handler: handler to be called for output queues
|
||||
* @irq_poll: Data IRQ polling handler (NULL when not supported)
|
||||
* @irq_poll: Data IRQ polling handler
|
||||
* @scan_threshold: # of in-use buffers that triggers scan on output queue
|
||||
* @int_parm: interruption parameter
|
||||
* @input_sbal_addr_array: per-queue array, each element points to 128 SBALs
|
||||
|
@ -139,9 +139,6 @@ struct qdio_dev_perf_stat {
|
||||
unsigned int qdio_int;
|
||||
unsigned int pci_request_int;
|
||||
|
||||
unsigned int tasklet_inbound;
|
||||
unsigned int tasklet_inbound_resched;
|
||||
unsigned int tasklet_inbound_resched2;
|
||||
unsigned int tasklet_outbound;
|
||||
|
||||
unsigned int siga_read;
|
||||
@ -193,6 +190,8 @@ struct qdio_output_q {
|
||||
struct qdio_outbuf_state *sbal_state;
|
||||
/* timer to check for more outbound work */
|
||||
struct timer_list timer;
|
||||
/* tasklet to check for completions */
|
||||
struct tasklet_struct tasklet;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -222,7 +221,6 @@ struct qdio_q {
|
||||
/* last scan of the queue */
|
||||
u64 timestamp;
|
||||
|
||||
struct tasklet_struct tasklet;
|
||||
struct qdio_queue_perf_stat q_stats;
|
||||
|
||||
struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
|
||||
@ -324,6 +322,14 @@ static inline int multicast_outbound(struct qdio_q *q)
|
||||
(q->nr == q->irq_ptr->nr_output_qs - 1);
|
||||
}
|
||||
|
||||
static inline void qdio_deliver_irq(struct qdio_irq *irq)
|
||||
{
|
||||
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
|
||||
irq->irq_poll(irq->cdev, irq->int_parm);
|
||||
else
|
||||
QDIO_PERF_STAT_INC(irq, int_discarded);
|
||||
}
|
||||
|
||||
#define pci_out_supported(irq) ((irq)->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
|
||||
#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
|
||||
|
||||
@ -359,13 +365,11 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr);
|
||||
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_add_device(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_remove_device(struct qdio_irq *irq_ptr);
|
||||
void tiqdio_inbound_processing(unsigned long q);
|
||||
int qdio_thinint_init(void);
|
||||
void qdio_thinint_exit(void);
|
||||
int test_nonshared_ind(struct qdio_irq *);
|
||||
|
||||
/* prototypes for setup */
|
||||
void qdio_inbound_processing(unsigned long data);
|
||||
void qdio_outbound_processing(unsigned long data);
|
||||
void qdio_outbound_timer(struct timer_list *t);
|
||||
void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
|
||||
|
@ -197,9 +197,6 @@ static char *qperf_names[] = {
|
||||
"Assumed adapter interrupts",
|
||||
"QDIO interrupts",
|
||||
"Requested PCIs",
|
||||
"Inbound tasklet runs",
|
||||
"Inbound tasklet resched",
|
||||
"Inbound tasklet resched2",
|
||||
"Outbound tasklet runs",
|
||||
"SIGA read",
|
||||
"SIGA write",
|
||||
|
@ -575,51 +575,12 @@ static void qdio_kick_handler(struct qdio_q *q, unsigned int start,
|
||||
static inline int qdio_tasklet_schedule(struct qdio_q *q)
|
||||
{
|
||||
if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) {
|
||||
tasklet_schedule(&q->tasklet);
|
||||
tasklet_schedule(&q->u.out.tasklet);
|
||||
return 0;
|
||||
}
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
static void __qdio_inbound_processing(struct qdio_q *q)
|
||||
{
|
||||
unsigned int start = q->first_to_check;
|
||||
int count;
|
||||
|
||||
qperf_inc(q, tasklet_inbound);
|
||||
|
||||
count = qdio_inbound_q_moved(q, start);
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
qdio_kick_handler(q, start, count);
|
||||
start = add_buf(start, count);
|
||||
q->first_to_check = start;
|
||||
|
||||
if (!qdio_inbound_q_done(q, start)) {
|
||||
/* means poll time is not yet over */
|
||||
qperf_inc(q, tasklet_inbound_resched);
|
||||
if (!qdio_tasklet_schedule(q))
|
||||
return;
|
||||
}
|
||||
|
||||
qdio_stop_polling(q);
|
||||
/*
|
||||
* We need to check again to not lose initiative after
|
||||
* resetting the ACK state.
|
||||
*/
|
||||
if (!qdio_inbound_q_done(q, start)) {
|
||||
qperf_inc(q, tasklet_inbound_resched2);
|
||||
qdio_tasklet_schedule(q);
|
||||
}
|
||||
}
|
||||
|
||||
void qdio_inbound_processing(unsigned long data)
|
||||
{
|
||||
struct qdio_q *q = (struct qdio_q *)data;
|
||||
__qdio_inbound_processing(q);
|
||||
}
|
||||
|
||||
static void qdio_check_pending(struct qdio_q *q, unsigned int index)
|
||||
{
|
||||
unsigned char state;
|
||||
@ -825,19 +786,6 @@ static inline void qdio_check_outbound_pci_queues(struct qdio_irq *irq)
|
||||
qdio_tasklet_schedule(out);
|
||||
}
|
||||
|
||||
void tiqdio_inbound_processing(unsigned long data)
|
||||
{
|
||||
struct qdio_q *q = (struct qdio_q *)data;
|
||||
|
||||
if (need_siga_sync(q) && need_siga_sync_after_ai(q))
|
||||
qdio_sync_queues(q);
|
||||
|
||||
/* The interrupt could be caused by a PCI request: */
|
||||
qdio_check_outbound_pci_queues(q->irq_ptr);
|
||||
|
||||
__qdio_inbound_processing(q);
|
||||
}
|
||||
|
||||
static inline void qdio_set_state(struct qdio_irq *irq_ptr,
|
||||
enum qdio_irq_states state)
|
||||
{
|
||||
@ -865,15 +813,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
|
||||
if (unlikely(irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
|
||||
return;
|
||||
|
||||
if (irq_ptr->irq_poll) {
|
||||
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state))
|
||||
irq_ptr->irq_poll(irq_ptr->cdev, irq_ptr->int_parm);
|
||||
else
|
||||
QDIO_PERF_STAT_INC(irq_ptr, int_discarded);
|
||||
} else {
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
qdio_deliver_irq(irq_ptr);
|
||||
|
||||
if (!pci_out_supported(irq_ptr) || !irq_ptr->scan_threshold)
|
||||
return;
|
||||
@ -1016,12 +956,9 @@ static void qdio_shutdown_queues(struct qdio_irq *irq_ptr)
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
for_each_input_queue(irq_ptr, q, i)
|
||||
tasklet_kill(&q->tasklet);
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
del_timer_sync(&q->u.out.timer);
|
||||
tasklet_kill(&q->tasklet);
|
||||
tasklet_kill(&q->u.out.tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1263,6 +1200,9 @@ int qdio_establish(struct ccw_device *cdev,
|
||||
!init_data->output_sbal_addr_array)
|
||||
return -EINVAL;
|
||||
|
||||
if (!init_data->irq_poll)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&irq_ptr->setup_mutex);
|
||||
qdio_trace_init_data(irq_ptr, init_data);
|
||||
qdio_setup_irq(irq_ptr, init_data);
|
||||
|
@ -259,14 +259,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
||||
|
||||
setup_storage_lists(q, irq_ptr,
|
||||
qdio_init->input_sbal_addr_array[i], i);
|
||||
|
||||
if (is_thinint_irq(irq_ptr)) {
|
||||
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
|
||||
(unsigned long) q);
|
||||
} else {
|
||||
tasklet_init(&q->tasklet, qdio_inbound_processing,
|
||||
(unsigned long) q);
|
||||
}
|
||||
}
|
||||
|
||||
for_each_output_queue(irq_ptr, q, i) {
|
||||
@ -280,7 +272,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
|
||||
setup_storage_lists(q, irq_ptr,
|
||||
qdio_init->output_sbal_addr_array[i], i);
|
||||
|
||||
tasklet_init(&q->tasklet, qdio_outbound_processing,
|
||||
tasklet_init(&q->u.out.tasklet, qdio_outbound_processing,
|
||||
(unsigned long) q);
|
||||
timer_setup(&q->u.out.timer, qdio_outbound_timer, 0);
|
||||
}
|
||||
@ -483,12 +475,8 @@ int qdio_setup_irq(struct qdio_irq *irq_ptr, struct qdio_initialize *init_data)
|
||||
ccw_device_get_schid(cdev, &irq_ptr->schid);
|
||||
setup_queues(irq_ptr, init_data);
|
||||
|
||||
if (init_data->irq_poll) {
|
||||
irq_ptr->irq_poll = init_data->irq_poll;
|
||||
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
|
||||
} else {
|
||||
irq_ptr->irq_poll = NULL;
|
||||
}
|
||||
irq_ptr->irq_poll = init_data->irq_poll;
|
||||
set_bit(QDIO_IRQ_DISABLED, &irq_ptr->poll_state);
|
||||
|
||||
setup_qib(irq_ptr, init_data);
|
||||
set_impl_params(irq_ptr, init_data->qib_param_field_format,
|
||||
|
@ -106,32 +106,6 @@ static inline u32 clear_shared_ind(void)
|
||||
return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
|
||||
}
|
||||
|
||||
static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
|
||||
{
|
||||
struct qdio_q *q;
|
||||
int i;
|
||||
|
||||
if (!references_shared_dsci(irq))
|
||||
xchg(irq->dsci, 0);
|
||||
|
||||
if (irq->irq_poll) {
|
||||
if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state))
|
||||
irq->irq_poll(irq->cdev, irq->int_parm);
|
||||
else
|
||||
QDIO_PERF_STAT_INC(irq, int_discarded);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_input_queue(irq, q, i) {
|
||||
/*
|
||||
* Call inbound processing but not directly
|
||||
* since that could starve other thinint queues.
|
||||
*/
|
||||
tasklet_schedule(&q->tasklet);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tiqdio_thinint_handler - thin interrupt handler for qdio
|
||||
* @airq: pointer to adapter interrupt descriptor
|
||||
@ -153,10 +127,14 @@ static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating)
|
||||
if (unlikely(references_shared_dsci(irq))) {
|
||||
if (!si_used)
|
||||
continue;
|
||||
} else if (!*irq->dsci)
|
||||
continue;
|
||||
} else {
|
||||
if (!*irq->dsci)
|
||||
continue;
|
||||
|
||||
tiqdio_call_inq_handlers(irq);
|
||||
xchg(irq->dsci, 0);
|
||||
}
|
||||
|
||||
qdio_deliver_irq(irq);
|
||||
|
||||
QDIO_PERF_STAT_INC(irq, adapter_int);
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user