summaryrefslogtreecommitdiffstats
path: root/kernel/rcu
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-08-06 11:31:51 -0700
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-09-20 21:16:20 -0700
commit97c668b8e983b722e2ed765b98b05f644aff1b13 (patch)
tree98ff9c8a31e2eb957703a0e547dc87e1c1178640 /kernel/rcu
parentbce5fa12aad148e15efd9bc0015dc4898b6e723b (diff)
downloadlinux-stable-97c668b8e983b722e2ed765b98b05f644aff1b13.tar.gz
linux-stable-97c668b8e983b722e2ed765b98b05f644aff1b13.tar.bz2
linux-stable-97c668b8e983b722e2ed765b98b05f644aff1b13.zip
rcu: Rename qs_pending to core_needs_qs
An upcoming commit needs to invert the sense of the ->passed_quiesce rcu_data structure field, so this commit is taking this opportunity to clarify things a bit by renaming ->qs_pending to ->core_needs_qs. So if !rdp->core_needs_qs, then this CPU need not concern itself with quiescent states, in particular, it need not acquire its leaf rcu_node structure's ->lock to check. Otherwise, it needs to report the next quiescent state. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel/rcu')
-rw-r--r--kernel/rcu/tree.c14
-rw-r--r--kernel/rcu/tree.h4
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/tree_trace.c4
4 files changed, 12 insertions, 12 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d2cdcada6fe0..7c158ffc7769 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1746,7 +1746,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
rdp->passed_quiesce = 0;
rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
- rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
+ rdp->core_needs_qs = !!(rnp->qsmask & rdp->grpmask);
zero_cpu_stall_ticks(rdp);
WRITE_ONCE(rdp->gpwrap, false);
}
@@ -2357,7 +2357,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
if ((rnp->qsmask & mask) == 0) {
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} else {
- rdp->qs_pending = 0;
+ rdp->core_needs_qs = 0;
/*
* This GP can't end until cpu checks in, so all of our
@@ -2388,7 +2388,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
* Does this CPU still need to do its part for current grace period?
* If no, return and let the other CPUs do their part as well.
*/
- if (!rdp->qs_pending)
+ if (!rdp->core_needs_qs)
return;
/*
@@ -3828,10 +3828,10 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
/* Is the RCU core waiting for a quiescent state from this CPU? */
if (rcu_scheduler_fully_active &&
- rdp->qs_pending && !rdp->passed_quiesce &&
+ rdp->core_needs_qs && !rdp->passed_quiesce &&
rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
- rdp->n_rp_qs_pending++;
- } else if (rdp->qs_pending &&
+ rdp->n_rp_core_needs_qs++;
+ } else if (rdp->core_needs_qs &&
(rdp->passed_quiesce ||
rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
rdp->n_rp_report_qs++;
@@ -4157,7 +4157,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
rdp->completed = rnp->completed;
rdp->passed_quiesce = false;
rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
- rdp->qs_pending = false;
+ rdp->core_needs_qs = false;
trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
raw_spin_unlock_irqrestore(&rnp->lock, flags);
}
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index efe361c764ab..4a0f30676ba8 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -303,7 +303,7 @@ struct rcu_data {
unsigned long rcu_qs_ctr_snap;/* Snapshot of rcu_qs_ctr to check */
/* for rcu_all_qs() invocations. */
bool passed_quiesce; /* User-mode/idle loop etc. */
- bool qs_pending; /* Core waits for quiesc state. */
+ bool core_needs_qs; /* Core waits for quiesc state. */
bool beenonline; /* CPU online at least once. */
bool gpwrap; /* Possible gpnum/completed wrap. */
struct rcu_node *mynode; /* This CPU's leaf of hierarchy */
@@ -368,7 +368,7 @@ struct rcu_data {
/* 5) __rcu_pending() statistics. */
unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
- unsigned long n_rp_qs_pending;
+ unsigned long n_rp_core_needs_qs;
unsigned long n_rp_report_qs;
unsigned long n_rp_cb_ready;
unsigned long n_rp_cpu_needs_gp;
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 6f7500f9387c..e33b4f3b8e0a 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -619,7 +619,7 @@ static void rcu_preempt_check_callbacks(void)
return;
}
if (t->rcu_read_lock_nesting > 0 &&
- __this_cpu_read(rcu_data_p->qs_pending) &&
+ __this_cpu_read(rcu_data_p->core_needs_qs) &&
!__this_cpu_read(rcu_data_p->passed_quiesce))
t->rcu_read_unlock_special.b.need_qs = true;
}
diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
index 6fc4c5ff3bb5..4ac25f8520d6 100644
--- a/kernel/rcu/tree_trace.c
+++ b/kernel/rcu/tree_trace.c
@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
ulong2long(rdp->completed), ulong2long(rdp->gpnum),
rdp->passed_quiesce,
rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
- rdp->qs_pending);
+ rdp->core_needs_qs);
seq_printf(m, " dt=%d/%llx/%d df=%lu",
atomic_read(&rdp->dynticks->dynticks),
rdp->dynticks->dynticks_nesting,
@@ -361,7 +361,7 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp)
cpu_is_offline(rdp->cpu) ? '!' : ' ',
rdp->n_rcu_pending);
seq_printf(m, "qsp=%ld rpq=%ld cbr=%ld cng=%ld ",
- rdp->n_rp_qs_pending,
+ rdp->n_rp_core_needs_qs,
rdp->n_rp_report_qs,
rdp->n_rp_cb_ready,
rdp->n_rp_cpu_needs_gp);