diff options
author | Jiri Kosina <jkosina@suse.cz> | 2022-01-10 09:56:57 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2022-01-10 09:56:57 +0100 |
commit | 3551a3ff8229e15d2a4b47b8234923bc72da65ef (patch) | |
tree | a62465753397b0268529a5d0b9ab43a8840069ad /include/linux/wait.h | |
parent | 906095af85e8b2e53ee9f8c50b3dff365aa09df8 (diff) | |
parent | 33a5c2793451770cb6dcf0cc35c76cfd4b045513 (diff) | |
download | linux-3551a3ff8229e15d2a4b47b8234923bc72da65ef.tar.gz linux-3551a3ff8229e15d2a4b47b8234923bc72da65ef.tar.bz2 linux-3551a3ff8229e15d2a4b47b8234923bc72da65ef.zip |
Merge branch 'for-5.17/letsketch' into for-linus
- new driver to support for LetSketch device (Hans de Goede)
Diffstat (limited to 'include/linux/wait.h')
-rw-r--r-- | include/linux/wait.h | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h index 2d0df57c9902..851e07da2583 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -217,6 +217,7 @@ void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key); void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr); void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); +void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL) #define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL) @@ -245,6 +246,31 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode); #define wake_up_interruptible_sync_poll_locked(x, m) \ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m)) +/** + * wake_up_pollfree - signal that a polled waitqueue is going away + * @wq_head: the wait queue head + * + * In the very rare cases where a ->poll() implementation uses a waitqueue whose + * lifetime is tied to a task rather than to the 'struct file' being polled, + * this function must be called before the waitqueue is freed so that + * non-blocking polls (e.g. epoll) are notified that the queue is going away. + * + * The caller must also RCU-delay the freeing of the wait_queue_head, e.g. via + * an explicit synchronize_rcu() or call_rcu(), or via SLAB_TYPESAFE_BY_RCU. + */ +static inline void wake_up_pollfree(struct wait_queue_head *wq_head) +{ + /* + * For performance reasons, we don't always take the queue lock here. + * Therefore, we might race with someone removing the last entry from + * the queue, and proceed while they still hold the queue lock. + * However, rcu_read_lock() is required to be held in such cases, so we + * can safely proceed with an RCU-delayed free. + */ + if (waitqueue_active(wq_head)) + __wake_up_pollfree(wq_head); +} + #define ___wait_cond_timeout(condition) \ ({ \ bool __cond = (condition); \ |