summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-05-16 11:02:54 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-05-16 11:02:54 -0700
commit9d38cd06c3e332093da2c486307395b302e2e31f (patch)
treec04652140f30d291a42053104b7673112d8424f8
parent21b9f1c7e319f654de3b2574fe8d4e4114c9143f (diff)
parent85f4f12d51397f1648e1f4350f77e24039b82d61 (diff)
downloadlinux-9d38cd06c3e332093da2c486307395b302e2e31f.tar.gz
linux-9d38cd06c3e332093da2c486307395b302e2e31f.tar.bz2
linux-9d38cd06c3e332093da2c486307395b302e2e31f.zip
Merge tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull memory barrier for from Steven Rostedt: "The memory barrier usage in updating the random ptr hash for %p in vsprintf is incorrect. Instead of adding the read memory barrier into vsprintf() which will cause a slight degradation to a commonly used function in the kernel just to solve a very unlikely race condition that can only happen at boot up, change the code from using a variable branch to a static_branch. Not only does this solve the race condition, it actually will improve the performance of vsprintf() by removing the conditional branch that is only needed at boot" * tag 'trace-v4.17-rc5-vsprintf' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: vsprintf: Replace memory barrier with static_key for random_ptr_key update
-rw-r--r--lib/vsprintf.c26
1 files changed, 15 insertions, 11 deletions
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 30c0cb8cc9bc..23920c5ff728 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1669,19 +1669,22 @@ char *pointer_string(char *buf, char *end, const void *ptr,
return number(buf, end, (unsigned long int)ptr, spec);
}
-static bool have_filled_random_ptr_key __read_mostly;
+static DEFINE_STATIC_KEY_TRUE(not_filled_random_ptr_key);
static siphash_key_t ptr_key __read_mostly;
-static void fill_random_ptr_key(struct random_ready_callback *unused)
+static void enable_ptr_key_workfn(struct work_struct *work)
{
get_random_bytes(&ptr_key, sizeof(ptr_key));
- /*
- * have_filled_random_ptr_key==true is dependent on get_random_bytes().
- * ptr_to_id() needs to see have_filled_random_ptr_key==true
- * after get_random_bytes() returns.
- */
- smp_mb();
- WRITE_ONCE(have_filled_random_ptr_key, true);
+ /* Needs to run from preemptible context */
+ static_branch_disable(&not_filled_random_ptr_key);
+}
+
+static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ /* This may be in an interrupt handler. */
+ queue_work(system_unbound_wq, &enable_ptr_key_work);
}
static struct random_ready_callback random_ready = {
@@ -1695,7 +1698,8 @@ static int __init initialize_ptr_random(void)
if (!ret) {
return 0;
} else if (ret == -EALREADY) {
- fill_random_ptr_key(&random_ready);
+ /* This is in preemptible context */
+ enable_ptr_key_workfn(&enable_ptr_key_work);
return 0;
}
@@ -1709,7 +1713,7 @@ static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
unsigned long hashval;
const int default_width = 2 * sizeof(ptr);
- if (unlikely(!have_filled_random_ptr_key)) {
+ if (static_branch_unlikely(&not_filled_random_ptr_key)) {
spec.field_width = default_width;
/* string length must be less than default_width */
return string(buf, end, "(ptrval)", spec);