blob: 7f17acf29dda75db431db1b13763c5d1e86fc917 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
*
* Copyright IBM Corporation, 2008
*
* Author: Paul E. McKenney <paulmck@linux.ibm.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
* Documentation/RCU
*/
#ifndef __LINUX_TINY_H
#define __LINUX_TINY_H
#include <asm/param.h> /* for HZ */
struct rcu_gp_oldstate {
unsigned long rgos_norm;
};
// Maximum number of rcu_gp_oldstate values corresponding to
// not-yet-completed RCU grace periods.
#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 2
/*
* Are the two oldstate values the same? See the Tree RCU version for
* docbook header.
*/
static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1,
struct rcu_gp_oldstate *rgosp2)
{
return rgosp1->rgos_norm == rgosp2->rgos_norm;
}
unsigned long get_state_synchronize_rcu(void);
static inline void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
rgosp->rgos_norm = get_state_synchronize_rcu();
}
unsigned long start_poll_synchronize_rcu(void);
static inline void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
rgosp->rgos_norm = start_poll_synchronize_rcu();
}
bool poll_state_synchronize_rcu(unsigned long oldstate);
static inline bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
return poll_state_synchronize_rcu(rgosp->rgos_norm);
}
static inline void cond_synchronize_rcu(unsigned long oldstate)
{
might_sleep();
}
static inline void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp)
{
cond_synchronize_rcu(rgosp->rgos_norm);
}
static inline unsigned long start_poll_synchronize_rcu_expedited(void)
{
return start_poll_synchronize_rcu();
}
static inline void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
rgosp->rgos_norm = start_poll_synchronize_rcu_expedited();
}
static inline void cond_synchronize_rcu_expedited(unsigned long oldstate)
{
cond_synchronize_rcu(oldstate);
}
static inline void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp)
{
cond_synchronize_rcu_expedited(rgosp->rgos_norm);
}
extern void rcu_barrier(void);
static inline void synchronize_rcu_expedited(void)
{
synchronize_rcu();
}
/*
* Add one more declaration of kvfree() here. It is
* not so straight forward to just include <linux/mm.h>
* where it is defined due to getting many compile
* errors caused by that include.
*/
extern void kvfree(const void *addr);
static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
if (head) {
call_rcu(head, (rcu_callback_t) ((void *) head - ptr));
return;
}
// kvfree_rcu(one_arg) call.
might_sleep();
synchronize_rcu();
kvfree(ptr);
}
#ifdef CONFIG_KASAN_GENERIC
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
#else
static inline void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
__kvfree_call_rcu(head, ptr);
}
#endif
void rcu_qs(void);
static inline void rcu_softirq_qs(void)
{
rcu_qs();
}
#define rcu_note_context_switch(preempt) \
do { \
rcu_qs(); \
rcu_tasks_qs(current, (preempt)); \
} while (0)
static inline int rcu_needs_cpu(void)
{
return 0;
}
/*
* Take advantage of the fact that there is only one CPU, which
* allows us to ignore virtualization-based context switches.
*/
static inline void rcu_virt_note_context_switch(void) { }
static inline void rcu_cpu_stall_reset(void) { }
static inline int rcu_jiffies_till_stall_check(void) { return 21 * HZ; }
static inline void rcu_irq_exit_check_preempt(void) { }
static inline void exit_rcu(void) { }
static inline bool rcu_preempt_need_deferred_qs(struct task_struct *t)
{
return false;
}
static inline void rcu_preempt_deferred_qs(struct task_struct *t) { }
void rcu_scheduler_starting(void);
static inline void rcu_end_inkernel_boot(void) { }
static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
static inline void rcu_momentary_dyntick_idle(void) { }
static inline void kfree_rcu_scheduler_running(void) { }
static inline bool rcu_gp_might_be_stalled(void) { return false; }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
/* RCUtree hotplug events */
#define rcutree_prepare_cpu NULL
#define rcutree_online_cpu NULL
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
static inline void rcu_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
|