From 5424d43e4daddf71aaa60d6565c8816af9150adf Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 10 Dec 2014 17:40:53 +0000 Subject: spi: Move queue data structure initialisation to main master init Since most devices now do use the standard queue and in order to avoid initialisation ordering issues being introduced by further refactorings to improve performance move the initialisation of the queue and the lock for it to the main master allocation. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index da7e6225b8f6..b81ccdb1bc16 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -989,9 +989,6 @@ static int spi_init_queue(struct spi_master *master) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; - INIT_LIST_HEAD(&master->queue); - spin_lock_init(&master->queue_lock); - master->running = false; master->busy = false; @@ -1595,6 +1592,8 @@ int spi_register_master(struct spi_master *master) dynamic = 1; } + INIT_LIST_HEAD(&master->queue); + spin_lock_init(&master->queue_lock); spin_lock_init(&master->bus_lock_spinlock); mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; -- cgit v1.2.3 From 983aee5d7090cf12b624f18533777caa09d067b1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 9 Dec 2014 19:46:56 +0000 Subject: spi: Check to see if the device is processing a message before we idle cur_msg is updated under the queue lock and holds the message we are currently processing. Since currently we only ever do removals in the pump kthread it doesn't matter in what order we do things but we want to be able to push things out from the submitting thread so pull the check to see if we're currently handling a message before we check to see if the queue is idle. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index b81ccdb1bc16..0bc752d17be5 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -891,8 +891,16 @@ static void spi_pump_messages(struct kthread_work *work) bool was_busy = false; int ret; - /* Lock queue and check for queue work */ + /* Lock queue */ spin_lock_irqsave(&master->queue_lock, flags); + + /* Make sure we are not already running a message */ + if (master->cur_msg) { + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + + /* Check if the queue is idle */ if (list_empty(&master->queue) || !master->running) { if (!master->busy) { spin_unlock_irqrestore(&master->queue_lock, flags); @@ -916,11 +924,6 @@ static void spi_pump_messages(struct kthread_work *work) return; } - /* Make sure we are not already running a message */ - if (master->cur_msg) { - spin_unlock_irqrestore(&master->queue_lock, flags); - return; - } /* Extract head of queue */ master->cur_msg = list_first_entry(&master->queue, struct spi_message, queue); -- cgit v1.2.3 From 0461a4149836c792d186027c8c859637a4cfb11a Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 9 Dec 2014 21:38:05 +0000 Subject: spi: Pump transfers inside calling context for spi_sync() If we are using the standard SPI message pump (which all drivers should be transitioning over to) then special case the message enqueue and instead of starting the worker thread to push messages to the hardware do so in the context of the caller if the controller is idle. This avoids a context switch in the common case where the controller has a single user in a single thread, for short PIO transfers there may be no need to context switch away from the calling context to complete the transfer. The code is a bit more complex than is desirable in part due to the need to handle drivers not using the standard queue and in part due to handling the various combinations of bus locking and asynchronous submission in interrupt context. It is still suboptimal since it will still wake the message pump for each transfer in order to schedule idling of the hardware and if multiple contexts are using the controller simultaneously a caller may end up pumping a message for some random other thread rather than for itself, and if the thread ends up deferring due to another context idling the hardware then it will just busy wait. It can, however, have the benefit of aggregating power up and down of the hardware when a caller performs a series of transfers back to back without any need for the use of spi_async(). Signed-off-by: Mark Brown --- drivers/spi/spi.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 58 insertions(+), 8 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0bc752d17be5..e1bf2579b9c0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -882,6 +882,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); * needs processing and if so call out to the driver to initialize hardware * and transfer each message. * + * Note that it is called both from the kthread itself and also from + * inside spi_sync(); the queue extraction handling at the top of the + * function should deal with this safely. */ static void spi_pump_messages(struct kthread_work *work) { @@ -900,6 +903,13 @@ static void spi_pump_messages(struct kthread_work *work) return; } + /* If another context is idling the device then defer */ + if (master->idling) { + queue_kthread_work(&master->kworker, &master->pump_messages); + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + /* Check if the queue is idle */ if (list_empty(&master->queue) || !master->running) { if (!master->busy) { @@ -907,7 +917,9 @@ static void spi_pump_messages(struct kthread_work *work) return; } master->busy = false; + master->idling = true; spin_unlock_irqrestore(&master->queue_lock, flags); + kfree(master->dummy_rx); master->dummy_rx = NULL; kfree(master->dummy_tx); @@ -921,6 +933,10 @@ static void spi_pump_messages(struct kthread_work *work) pm_runtime_put_autosuspend(master->dev.parent); } trace_spi_master_idle(master); + + spin_lock_irqsave(&master->queue_lock, flags); + master->idling = false; + spin_unlock_irqrestore(&master->queue_lock, flags); return; } @@ -1161,12 +1177,9 @@ static int spi_destroy_queue(struct spi_master *master) return 0; } -/** - * spi_queued_transfer - transfer function for queued transfers - * @spi: spi device which is requesting transfer - * @msg: spi message which is to handled is queued to driver queue - */ -static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +static int __spi_queued_transfer(struct spi_device *spi, + struct spi_message *msg, + bool need_pump) { struct spi_master *master = spi->master; unsigned long flags; @@ -1181,13 +1194,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &master->queue); - if (!master->busy) + if (!master->busy && need_pump) queue_kthread_work(&master->kworker, &master->pump_messages); spin_unlock_irqrestore(&master->queue_lock, flags); return 0; } +/** + * spi_queued_transfer - transfer function for queued transfers + * @spi: spi device which is requesting transfer + * @msg: spi message which is to handled is queued to driver queue + */ +static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg) +{ + return __spi_queued_transfer(spi, msg, true); +} + static int spi_master_initialize_queue(struct spi_master *master) { int ret; @@ -2102,19 +2125,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, DECLARE_COMPLETION_ONSTACK(done); int status; struct spi_master *master = spi->master; + unsigned long flags; + + status = __spi_validate(spi, message); + if (status != 0) + return status; message->complete = spi_complete; message->context = &done; + message->spi = spi; if (!bus_locked) mutex_lock(&master->bus_lock_mutex); - status = spi_async_locked(spi, message); + /* If we're not using the legacy transfer method then we will + * try to transfer in the calling context so special case. + * This code would be less tricky if we could remove the + * support for driver implemented message queues. + */ + if (master->transfer == spi_queued_transfer) { + spin_lock_irqsave(&master->bus_lock_spinlock, flags); + + trace_spi_message_submit(message); + + status = __spi_queued_transfer(spi, message, false); + + spin_unlock_irqrestore(&master->bus_lock_spinlock, flags); + } else { + status = spi_async_locked(spi, message); + } if (!bus_locked) mutex_unlock(&master->bus_lock_mutex); if (status == 0) { + /* Push out the messages in the calling context if we + * can. + */ + if (master->transfer == spi_queued_transfer) + spi_pump_messages(&master->pump_messages); + wait_for_completion(&done); status = message->status; } -- cgit v1.2.3 From fc9e0f71f2d7ea43fa3ba0bea68586d1462cb5a7 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Wed, 10 Dec 2014 13:46:33 +0000 Subject: spi: Only idle the message pump in the worker kthread In order to avoid the situation where the kthread is waiting for another context to make the hardware idle let the message pump know if it's being called from the worker thread context and if it isn't then defer to the worker thread instead of idling the hardware immediately. This will ensure that if this situation happens we block rather than busy waiting. Signed-off-by: Mark Brown --- drivers/spi/spi.c | 32 ++++++++++++++++++++++++++------ 1 file changed, 26 insertions(+), 6 deletions(-) (limited to 'drivers/spi/spi.c') diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e1bf2579b9c0..3ac188fc36f5 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -875,8 +875,9 @@ void spi_finalize_current_transfer(struct spi_master *master) EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); /** - * spi_pump_messages - kthread work function which processes spi message queue - * @work: pointer to kthread work struct contained in the master struct + * __spi_pump_messages - function which processes spi message queue + * @master: master to process queue for + * @in_kthread: true if we are in the context of the message pump thread * * This function checks if there is any spi message in the queue that * needs processing and if so call out to the driver to initialize hardware @@ -886,10 +887,8 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer); * inside spi_sync(); the queue extraction handling at the top of the * function should deal with this safely. */ -static void spi_pump_messages(struct kthread_work *work) +static void __spi_pump_messages(struct spi_master *master, bool in_kthread) { - struct spi_master *master = - container_of(work, struct spi_master, pump_messages); unsigned long flags; bool was_busy = false; int ret; @@ -916,6 +915,15 @@ static void spi_pump_messages(struct kthread_work *work) spin_unlock_irqrestore(&master->queue_lock, flags); return; } + + /* Only do teardown in the thread */ + if (!in_kthread) { + queue_kthread_work(&master->kworker, + &master->pump_messages); + spin_unlock_irqrestore(&master->queue_lock, flags); + return; + } + master->busy = false; master->idling = true; spin_unlock_irqrestore(&master->queue_lock, flags); @@ -1004,6 +1012,18 @@ static void spi_pump_messages(struct kthread_work *work) } } +/** + * spi_pump_messages - kthread work function which processes spi message queue + * @work: pointer to kthread work struct contained in the master struct + */ +static void spi_pump_messages(struct kthread_work *work) +{ + struct spi_master *master = + container_of(work, struct spi_master, pump_messages); + + __spi_pump_messages(master, true); +} + static int spi_init_queue(struct spi_master *master) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; @@ -2163,7 +2183,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, * can. */ if (master->transfer == spi_queued_transfer) - spi_pump_messages(&master->pump_messages); + __spi_pump_messages(master, false); wait_for_completion(&done); status = message->status; -- cgit v1.2.3