summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-04-12 09:02:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-04-12 09:02:24 -0700
commit5939d45155bb405ab212ef82992a8695b35f6662 (patch)
treedc3349058ebd12597928557585d59e05c09198de /kernel
parente00011a146cde3a86fff96cb643f9b7dd40166ac (diff)
parentffe3986fece696cf65e0ef99e74c75f848be8e30 (diff)
downloadlinux-stable-5939d45155bb405ab212ef82992a8695b35f6662.tar.gz
linux-stable-5939d45155bb405ab212ef82992a8695b35f6662.tar.bz2
linux-stable-5939d45155bb405ab212ef82992a8695b35f6662.zip
Merge tag 'trace-v6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix the buffer_percent accounting as it is dependent on three variables: 1) pages_read - number of subbuffers read 2) pages_lost - number of subbuffers lost due to overwrite 3) pages_touched - number of pages that a writer entered These three counters only increment, and to know how many active pages there are on the buffer at any given time, the pages_read and pages_lost are subtracted from pages_touched. But the pages touched was incremented whenever any writer went to the next subbuffer even if it wasn't the only one, so it was incremented more than it should be causing the counter for how many subbuffers currently have content incorrect, which caused the buffer_percent that holds waiters until the ring buffer is filled to a given percentage to wake up early. - Fix warning of unused functions when PERF_EVENTS is not configured in - Replace bad tab with space in Kconfig for FTRACE_RECORD_RECURSION_SIZE - Fix to some kerneldoc function comments in eventfs code. * tag 'trace-v6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: ring-buffer: Only update pages_touched when a new page is touched tracing: hide unused ftrace_event_id_fops tracing: Fix FTRACE_RECORD_RECURSION_SIZE Kconfig entry eventfs: Fix kernel-doc comments to functions
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/ring_buffer.c6
-rw-r--r--kernel/trace/trace_events.c4
3 files changed, 8 insertions, 4 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 61c541c36596..47345bf1d4a9 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -965,7 +965,7 @@ config FTRACE_RECORD_RECURSION
config FTRACE_RECORD_RECURSION_SIZE
int "Max number of recursed functions to record"
- default 128
+ default 128
depends on FTRACE_RECORD_RECURSION
help
This defines the limit of number of functions that can be
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 25476ead681b..6511dc3a00da 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1393,7 +1393,6 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
- local_inc(&cpu_buffer->pages_touched);
/*
* Just make sure we have seen our old_write and synchronize
* with any interrupts that come in.
@@ -1430,8 +1429,9 @@ static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
*/
local_set(&next_page->page->commit, 0);
- /* Again, either we update tail_page or an interrupt does */
- (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page);
+ /* Either we update tail_page or an interrupt does */
+ if (try_cmpxchg(&cpu_buffer->tail_page, &tail_page, next_page))
+ local_inc(&cpu_buffer->pages_touched);
}
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7c364b87352e..52f75c36bbca 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1670,6 +1670,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return 0;
}
+#ifdef CONFIG_PERF_EVENTS
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
@@ -1684,6 +1685,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
+#endif
static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
@@ -2152,10 +2154,12 @@ static const struct file_operations ftrace_event_format_fops = {
.release = seq_release,
};
+#ifdef CONFIG_PERF_EVENTS
static const struct file_operations ftrace_event_id_fops = {
.read = event_id_read,
.llseek = default_llseek,
};
+#endif
static const struct file_operations ftrace_event_filter_fops = {
.open = tracing_open_file_tr,