summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rtmutex_common.h
blob: 1162e07cdaea13197389df3d8572570c47fe8c60 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * RT Mutexes: blocking mutual exclusion locks with PI support
 *
 * started by Ingo Molnar and Thomas Gleixner:
 *
 *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *
 * This file contains the private data structure and API definitions.
 */

#ifndef __KERNEL_RTMUTEX_COMMON_H
#define __KERNEL_RTMUTEX_COMMON_H

#include <linux/debug_locks.h>
#include <linux/rtmutex.h>
#include <linux/sched/wake_q.h>


/*
 * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
 * separate trees and they need their own copy of the sort keys because of
 * different locking requirements.
 *
 * @entry:		rbtree node to enqueue into the waiters tree
 * @prio:		Priority of the waiter
 * @deadline:		Deadline of the waiter if applicable
 *
 * See rt_waiter_node_less() and waiter_*_prio().
 */
struct rt_waiter_node {
	struct rb_node	entry;
	int		prio;
	u64		deadline;
};

/*
 * This is the control structure for tasks blocked on a rt_mutex,
 * which is allocated on the kernel stack on of the blocked task.
 *
 * @tree:		node to enqueue into the mutex waiters tree
 * @pi_tree:		node to enqueue into the mutex owner waiters tree
 * @task:		task reference to the blocked task
 * @lock:		Pointer to the rt_mutex on which the waiter blocks
 * @wake_state:		Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT)
 * @ww_ctx:		WW context pointer
 *
 * @tree is ordered by @lock->wait_lock
 * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock
 */
struct rt_mutex_waiter {
	struct rt_waiter_node	tree;
	struct rt_waiter_node	pi_tree;
	struct task_struct	*task;
	struct rt_mutex_base	*lock;
	unsigned int		wake_state;
	struct ww_acquire_ctx	*ww_ctx;
};

/**
 * rt_wake_q_head - Wrapper around regular wake_q_head to support
 *		    "sleeping" spinlocks on RT
 * @head:		The regular wake_q_head for sleeping lock variants
 * @rtlock_task:	Task pointer for RT lock (spin/rwlock) wakeups
 */
struct rt_wake_q_head {
	struct wake_q_head	head;
	struct task_struct	*rtlock_task;
};

#define DEFINE_RT_WAKE_Q(name)						\
	struct rt_wake_q_head name = {					\
		.head		= WAKE_Q_HEAD_INITIALIZER(name.head),	\
		.rtlock_task	= NULL,					\
	}

/*
 * PI-futex support (proxy locking functions, etc.):
 */
extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
				       struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock);
extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
				     struct rt_mutex_waiter *waiter,
				     struct task_struct *task);
extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
				     struct rt_mutex_waiter *waiter,
				     struct task_struct *task);
extern int rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
			       struct hrtimer_sleeper *to,
			       struct rt_mutex_waiter *waiter);
extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
				 struct rt_mutex_waiter *waiter);

extern int rt_mutex_futex_trylock(struct rt_mutex_base *l);
extern int __rt_mutex_futex_trylock(struct rt_mutex_base *l);

extern void rt_mutex_futex_unlock(struct rt_mutex_base *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
				struct rt_wake_q_head *wqh);

extern void rt_mutex_postunlock(struct rt_wake_q_head *wqh);

/*
 * Must be guarded because this header is included from rcu/tree_plugin.h
 * unconditionally.
 */
#ifdef CONFIG_RT_MUTEXES
static inline int rt_mutex_has_waiters(struct rt_mutex_base *lock)
{
	return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
}

/*
 * Lockless speculative check whether @waiter is still the top waiter on
 * @lock. This is solely comparing pointers and not derefencing the
 * leftmost entry which might be about to vanish.
 */
static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock,
						 struct rt_mutex_waiter *waiter)
{
	struct rb_node *leftmost = rb_first_cached(&lock->waiters);

	return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter;
}

static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock)
{
	struct rb_node *leftmost = rb_first_cached(&lock->waiters);
	struct rt_mutex_waiter *w = NULL;

	lockdep_assert_held(&lock->wait_lock);

	if (leftmost) {
		w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry);
		BUG_ON(w->lock != lock);
	}
	return w;
}

static inline int task_has_pi_waiters(struct task_struct *p)
{
	return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
}

static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
{
	lockdep_assert_held(&p->pi_lock);

	return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
			pi_tree.entry);
}

#define RT_MUTEX_HAS_WAITERS	1UL

static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
	unsigned long owner = (unsigned long) READ_ONCE(lock->owner);

	return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
}

/*
 * Constants for rt mutex functions which have a selectable deadlock
 * detection.
 *
 * RT_MUTEX_MIN_CHAINWALK:	Stops the lock chain walk when there are
 *				no further PI adjustments to be made.
 *
 * RT_MUTEX_FULL_CHAINWALK:	Invoke deadlock detection with a full
 *				walk of the lock chain.
 */
enum rtmutex_chainwalk {
	RT_MUTEX_MIN_CHAINWALK,
	RT_MUTEX_FULL_CHAINWALK,
};

static inline void __rt_mutex_base_init(struct rt_mutex_base *lock)
{
	raw_spin_lock_init(&lock->wait_lock);
	lock->waiters = RB_ROOT_CACHED;
	lock->owner = NULL;
}

/* Debug functions */
static inline void debug_rt_mutex_unlock(struct rt_mutex_base *lock)
{
	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
		DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}

static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
		DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}

static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
		memset(waiter, 0x11, sizeof(*waiter));
}

static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
	if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
		memset(waiter, 0x22, sizeof(*waiter));
}

static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
	debug_rt_mutex_init_waiter(waiter);
	RB_CLEAR_NODE(&waiter->pi_tree.entry);
	RB_CLEAR_NODE(&waiter->tree.entry);
	waiter->wake_state = TASK_NORMAL;
	waiter->task = NULL;
}

static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
{
	rt_mutex_init_waiter(waiter);
	waiter->wake_state = TASK_RTLOCK_WAIT;
}

#else /* CONFIG_RT_MUTEXES */
/* Used in rcu/tree_plugin.h */
static inline struct task_struct *rt_mutex_owner(struct rt_mutex_base *lock)
{
	return NULL;
}
#endif  /* !CONFIG_RT_MUTEXES */

#endif