summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2024-02-20 19:36:14 -1000
committerTejun Heo <tj@kernel.org>2024-02-20 19:36:14 -1000
commitcdc6e4b329bc82676886a758a940b2b6987c2109 (patch)
tree70d128ad813c0eecc5590ffdd5774dcd1b7188ac /kernel/workqueue.c
parentc5140688d19a4579f7b01e6ca4b6e5f5d23d3d4d (diff)
downloadlinux-cdc6e4b329bc82676886a758a940b2b6987c2109.tar.gz
linux-cdc6e4b329bc82676886a758a940b2b6987c2109.tar.bz2
linux-cdc6e4b329bc82676886a758a940b2b6987c2109.zip
workqueue: Reorganize flush and cancel[_sync] functions
They are currently a bit disorganized with flush and cancel functions mixed. Reoranize them so that flush functions come first, cancel next and cancel_sync last. This way, we won't have to add prototypes for internal functions for the planned disable/enable support. This is pure code reorganization. No functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c136
1 files changed, 68 insertions, 68 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7e2af79bfa62..962061dca05c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4061,6 +4061,65 @@ bool flush_work(struct work_struct *work)
}
EXPORT_SYMBOL_GPL(flush_work);
+/**
+ * flush_delayed_work - wait for a dwork to finish executing the last queueing
+ * @dwork: the delayed work to flush
+ *
+ * Delayed timer is cancelled and the pending work is queued for
+ * immediate execution. Like flush_work(), this function only
+ * considers the last queueing instance of @dwork.
+ *
+ * Return:
+ * %true if flush_work() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_delayed_work(struct delayed_work *dwork)
+{
+ local_irq_disable();
+ if (del_timer_sync(&dwork->timer))
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
+ local_irq_enable();
+ return flush_work(&dwork->work);
+}
+EXPORT_SYMBOL(flush_delayed_work);
+
+/**
+ * flush_rcu_work - wait for a rwork to finish executing the last queueing
+ * @rwork: the rcu work to flush
+ *
+ * Return:
+ * %true if flush_rcu_work() waited for the work to finish execution,
+ * %false if it was already idle.
+ */
+bool flush_rcu_work(struct rcu_work *rwork)
+{
+ if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
+ rcu_barrier();
+ flush_work(&rwork->work);
+ return true;
+ } else {
+ return flush_work(&rwork->work);
+ }
+}
+EXPORT_SYMBOL(flush_rcu_work);
+
+static bool __cancel_work(struct work_struct *work, bool is_dwork)
+{
+ unsigned long flags;
+ int ret;
+
+ do {
+ ret = try_to_grab_pending(work, is_dwork, &flags);
+ } while (unlikely(ret == -EAGAIN));
+
+ if (unlikely(ret < 0))
+ return false;
+
+ set_work_pool_and_clear_pending(work, get_work_pool_id(work));
+ local_irq_restore(flags);
+ return ret;
+}
+
struct cwt_wait {
wait_queue_entry_t wait;
struct work_struct *work;
@@ -4139,6 +4198,15 @@ static bool __cancel_work_sync(struct work_struct *work, bool is_dwork)
return ret;
}
+/*
+ * See cancel_delayed_work()
+ */
+bool cancel_work(struct work_struct *work)
+{
+ return __cancel_work(work, false);
+}
+EXPORT_SYMBOL(cancel_work);
+
/**
* cancel_work_sync - cancel a work and wait for it to finish
* @work: the work to cancel
@@ -4164,74 +4232,6 @@ bool cancel_work_sync(struct work_struct *work)
EXPORT_SYMBOL_GPL(cancel_work_sync);
/**
- * flush_delayed_work - wait for a dwork to finish executing the last queueing
- * @dwork: the delayed work to flush
- *
- * Delayed timer is cancelled and the pending work is queued for
- * immediate execution. Like flush_work(), this function only
- * considers the last queueing instance of @dwork.
- *
- * Return:
- * %true if flush_work() waited for the work to finish execution,
- * %false if it was already idle.
- */
-bool flush_delayed_work(struct delayed_work *dwork)
-{
- local_irq_disable();
- if (del_timer_sync(&dwork->timer))
- __queue_work(dwork->cpu, dwork->wq, &dwork->work);
- local_irq_enable();
- return flush_work(&dwork->work);
-}
-EXPORT_SYMBOL(flush_delayed_work);
-
-/**
- * flush_rcu_work - wait for a rwork to finish executing the last queueing
- * @rwork: the rcu work to flush
- *
- * Return:
- * %true if flush_rcu_work() waited for the work to finish execution,
- * %false if it was already idle.
- */
-bool flush_rcu_work(struct rcu_work *rwork)
-{
- if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
- rcu_barrier();
- flush_work(&rwork->work);
- return true;
- } else {
- return flush_work(&rwork->work);
- }
-}
-EXPORT_SYMBOL(flush_rcu_work);
-
-static bool __cancel_work(struct work_struct *work, bool is_dwork)
-{
- unsigned long flags;
- int ret;
-
- do {
- ret = try_to_grab_pending(work, is_dwork, &flags);
- } while (unlikely(ret == -EAGAIN));
-
- if (unlikely(ret < 0))
- return false;
-
- set_work_pool_and_clear_pending(work, get_work_pool_id(work));
- local_irq_restore(flags);
- return ret;
-}
-
-/*
- * See cancel_delayed_work()
- */
-bool cancel_work(struct work_struct *work)
-{
- return __cancel_work(work, false);
-}
-EXPORT_SYMBOL(cancel_work);
-
-/**
* cancel_delayed_work - cancel a delayed work
* @dwork: delayed_work to cancel
*