diff options
author | Jan Glauber <jang@linux.vnet.ibm.com> | 2010-09-07 21:14:39 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-08 14:31:00 -0700 |
commit | d36deae75011a7890f0e730dd0f867c64081cb50 (patch) | |
tree | d24672cf5cc40c637186362187450362cabefd24 /drivers/s390/cio | |
parent | e508be174ad36b0cf9b324cd04978c2b13c21502 (diff) | |
download | linux-d36deae75011a7890f0e730dd0f867c64081cb50.tar.gz linux-d36deae75011a7890f0e730dd0f867c64081cb50.tar.bz2 linux-d36deae75011a7890f0e730dd0f867c64081cb50.zip |
qdio: extend API to allow polling
Extend the qdio API to allow polling in the upper-layer driver. This
is needed by qeth to use NAPI.
To use the new interface the upper-layer driver must specify the
queue_start_poll(). This callback is used to signal the upper-layer
driver that is has initiative and must process the inbound queue by
calling qdio_get_next_buffers(). If the upper-layer driver wants to
stop polling it calls qdio_start_irq().
Since adapter interrupts are not completely stoppable qdio implements
a software bit QDIO_QUEUE_IRQS_DISABLED to safely disable interrupts for an
input queue.
The old interface is preserved and will be used as is by zfcp.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/qdio.h | 29 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 33 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 138 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 1 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 66 |
5 files changed, 216 insertions, 51 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index f0037eefd44e..0f4ef8769a3d 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -208,6 +208,7 @@ struct qdio_dev_perf_stat { unsigned int eqbs_partial; unsigned int sqbs; unsigned int sqbs_partial; + unsigned int int_discarded; } ____cacheline_aligned; struct qdio_queue_perf_stat { @@ -222,6 +223,10 @@ struct qdio_queue_perf_stat { unsigned int nr_sbal_total; }; +enum qdio_queue_irq_states { + QDIO_QUEUE_IRQS_DISABLED, +}; + struct qdio_input_q { /* input buffer acknowledgement flag */ int polling; @@ -231,6 +236,10 @@ struct qdio_input_q { int ack_count; /* last time of noticing incoming data */ u64 timestamp; + /* upper-layer polling flag */ + unsigned long queue_irq_state; + /* callback to start upper-layer polling */ + void (*queue_start_poll) (struct ccw_device *, int, unsigned long); }; struct qdio_output_q { @@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q) #define sub_buf(bufnr, dec) \ ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) +#define queue_irqs_enabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) +#define queue_irqs_disabled(q) \ + (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) + +#define TIQDIO_SHARED_IND 63 + +/* device state change indicators */ +struct indicator_t { + u32 ind; /* u32 because of compare-and-swap performance */ + atomic_t count; /* use count, 0 or 1 for non-shared indicators */ +}; + +extern struct indicator_t *q_indicators; + +static inline int shared_ind(struct qdio_irq *irq_ptr) +{ + return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; +} + /* prototypes for thin interrupt */ void qdio_setup_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr); diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 6ce83f56d537..28868e7471a5 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c @@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v) seq_printf(m, "DSCI: %d nr_used: %d\n", *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); - seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); - seq_printf(m, "polling: %d ack start: %d ack count: %d\n", - q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); + seq_printf(m, "ftc: %d last_move: %d\n", + q->first_to_check, q->last_move); + if (q->is_input_q) { + seq_printf(m, "polling: %d ack start: %d ack count: %d\n", + q->u.in.polling, q->u.in.ack_start, + q->u.in.ack_count); + seq_printf(m, "IRQs disabled: %u\n", + test_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)); + } seq_printf(m, "SBAL states:\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); @@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v) return 0; } -static ssize_t qstat_seq_write(struct file *file, const char __user *buf, - size_t count, loff_t *off) -{ - struct seq_file *seq = file->private_data; - struct qdio_q *q = seq->private; - - if (!q) - return 0; - if (q->is_input_q) - xchg(q->irq_ptr->dsci, 1); - local_bh_disable(); - tasklet_schedule(&q->tasklet); - local_bh_enable(); - return count; -} - static int qstat_seq_open(struct inode *inode, struct file *filp) { return single_open(filp, qstat_show, @@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = { .owner = THIS_MODULE, .open = qstat_seq_open, .read = seq_read, - .write = qstat_seq_write, .llseek = seq_lseek, .release = single_release, }; @@ -166,7 +156,8 @@ static char *qperf_names[] = { "QEBSM eqbs", "QEBSM eqbs partial", "QEBSM sqbs", - "QEBSM sqbs partial" + "QEBSM sqbs partial", + "Discarded interrupts" }; static int qperf_show(struct seq_file *m, void *v) diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 00520f9a7a8e..5fcfa7f9e9ef 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) return; - for_each_input_queue(irq_ptr, q, i) - tasklet_schedule(&q->tasklet); + for_each_input_queue(irq_ptr, q, i) { + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else + tasklet_schedule(&q->tasklet); + } if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) return; @@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, } EXPORT_SYMBOL_GPL(do_QDIO); +/** + * qdio_start_irq - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - success + * 1 - irqs not started since new data is available + */ +int qdio_start_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + WARN_ON(queue_irqs_enabled(q)); + + if (!shared_ind(q->irq_ptr)) + xchg(q->irq_ptr->dsci, 0); + + qdio_stop_polling(q); + clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); + + /* + * We need to check again to not lose initiative after + * resetting the ACK state. + */ + if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) + goto rescan; + if (!qdio_inbound_q_done(q)) + goto rescan; + return 0; + +rescan: + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; + +} +EXPORT_SYMBOL(qdio_start_irq); + +/** + * qdio_get_next_buffers - process input buffers + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * @bufnr: first filled buffer number + * @error: buffers are in error state + * + * Return codes + * < 0 - error + * = 0 - no new buffers found + * > 0 - number of processed buffers + */ +int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, + int *error) +{ + struct qdio_q *q; + int start, end; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + WARN_ON(queue_irqs_enabled(q)); + + qdio_sync_after_thinint(q); + + /* + * The interrupt could be caused by a PCI request. Check the + * PCI capable outbound queues. + */ + qdio_check_outbound_after_thinint(q); + + if (!qdio_inbound_q_moved(q)) + return 0; + + /* Note: upper-layer MUST stop processing immediately here ... */ + if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) + return -EIO; + + start = q->first_to_kick; + end = q->first_to_check; + *bufnr = start; + *error = q->qdio_error; + + /* for the next time */ + q->first_to_kick = end; + q->qdio_error = 0; + return sub_buf(end, start); +} +EXPORT_SYMBOL(qdio_get_next_buffers); + +/** + * qdio_stop_irq - disable interrupt processing for the device + * @cdev: associated ccw_device for the qdio subchannel + * @nr: input queue number + * + * Return codes + * 0 - interrupts were already disabled + * 1 - interrupts successfully disabled + */ +int qdio_stop_irq(struct ccw_device *cdev, int nr) +{ + struct qdio_q *q; + struct qdio_irq *irq_ptr = cdev->private->qdio_data; + + if (!irq_ptr) + return -ENODEV; + q = irq_ptr->input_qs[nr]; + + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) + return 0; + else + return 1; +} +EXPORT_SYMBOL(qdio_stop_irq); + static int __init init_QDIO(void) { int rc; diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 34c7e4046df4..a13cf7ec64b2 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr, setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); q->is_input_q = 1; + q->u.in.queue_start_poll = qdio_init->queue_start_poll; setup_storage_lists(q, irq_ptr, input_sbal_array, i); input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8daf1b99f153..752dbee06af5 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -25,24 +25,20 @@ */ #define TIQDIO_NR_NONSHARED_IND 63 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) -#define TIQDIO_SHARED_IND 63 /* list of thin interrupt input queues */ static LIST_HEAD(tiq_list); DEFINE_MUTEX(tiq_list_lock); /* adapter local summary indicator */ -static unsigned char *tiqdio_alsi; +static u8 *tiqdio_alsi; -/* device state change indicators */ -struct indicator_t { - u32 ind; /* u32 because of compare-and-swap performance */ - atomic_t count; /* use count, 0 or 1 for non-shared indicators */ -}; -static struct indicator_t *q_indicators; +struct indicator_t *q_indicators; static int css_qdio_omit_svs; +static u64 last_ai_time; + static inline unsigned long do_clear_global_summary(void) { register unsigned long __fn asm("1") = 3; @@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) } } -static inline int shared_ind(struct qdio_irq *irq_ptr) +static inline int shared_ind_used(void) { - return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; + return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); } /** * tiqdio_thinint_handler - thin interrupt handler for qdio - * @ind: pointer to adapter local summary indicator - * @drv_data: NULL + * @alsi: pointer to adapter local summary indicator + * @data: NULL */ -static void tiqdio_thinint_handler(void *ind, void *drv_data) +static void tiqdio_thinint_handler(void *alsi, void *data) { struct qdio_q *q; + last_ai_time = S390_lowcore.int_clock; + /* * SVS only when needed: issue SVS to benefit from iqdio interrupt - * avoidance (SVS clears adapter interrupt suppression overwrite) + * avoidance (SVS clears adapter interrupt suppression overwrite). */ if (!css_qdio_omit_svs) do_clear_global_summary(); - /* - * reset local summary indicator (tiqdio_alsi) to stop adapter - * interrupts for now - */ - xchg((u8 *)ind, 0); + /* reset local summary indicator */ + if (shared_ind_used()) + xchg(tiqdio_alsi, 0); /* protect tiq_list entries, only changed in activate or shutdown */ rcu_read_lock(); /* check for work on all inbound thinint queues */ - list_for_each_entry_rcu(q, &tiq_list, entry) + list_for_each_entry_rcu(q, &tiq_list, entry) { + /* only process queues from changed sets */ - if (*q->irq_ptr->dsci) { - qperf_inc(q, adapter_int); + if (!*q->irq_ptr->dsci) + continue; + if (q->u.in.queue_start_poll) { + /* skip if polling is enabled or already in work */ + if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, + &q->u.in.queue_irq_state)) { + qperf_inc(q, int_discarded); + continue; + } + + /* avoid dsci clear here, done after processing */ + q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, + q->irq_ptr->int_parm); + } else { /* only clear it if the indicator is non-shared */ if (!shared_ind(q->irq_ptr)) xchg(q->irq_ptr->dsci, 0); /* - * don't call inbound processing directly since - * that could starve other thinint queues + * Call inbound processing but not directly + * since that could starve other thinint queues. */ tasklet_schedule(&q->tasklet); } - + qperf_inc(q, adapter_int); + } rcu_read_unlock(); /* - * if we used the shared indicator clear it now after all queues - * were processed + * If the shared indicator was used clear it now after all queues + * were processed. */ - if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { + if (shared_ind_used()) { xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); /* prevent racing */ |