diff options
author | Steven Rostedt (Red Hat) <rostedt@goodmis.org> | 2015-11-17 16:36:06 -0500 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2015-11-25 15:24:05 -0500 |
commit | 4239c38fe0b3847e1e6d962c74b41b08ba0e2990 (patch) | |
tree | 234c0d0be4a749956e455bf3f636f219d6e2389a /kernel | |
parent | 70004986ffdf36d8bc787403af2571aeeef96595 (diff) | |
download | linux-stable-4239c38fe0b3847e1e6d962c74b41b08ba0e2990.tar.gz linux-stable-4239c38fe0b3847e1e6d962c74b41b08ba0e2990.tar.bz2 linux-stable-4239c38fe0b3847e1e6d962c74b41b08ba0e2990.zip |
ring-buffer: Process commits whenever moving to a new page.
When crossing over to a new page, commit the current work. This will allow
readers to get data with less latency, and also simplifies the work to get
timestamps working for interrupted events.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/ring_buffer.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 631541a53baf..95181e36891a 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -2129,6 +2129,8 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, local_sub(length, &tail_page->write); } +static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer); + /* * This is the slow path, force gcc not to inline it. */ @@ -2220,6 +2222,11 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, rb_reset_tail(cpu_buffer, tail, info); + /* Commit what we have for now. */ + rb_end_commit(cpu_buffer); + /* rb_end_commit() decs committing */ + local_inc(&cpu_buffer->committing); + /* fail and let the caller try again */ return ERR_PTR(-EAGAIN); |