summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-01-08 14:27:34 +0100
committerRobert Richter <robert.richter@amd.com>2009-01-08 14:27:34 +0100
commitd2852b932f0bb5e89177aa27c7bcf07f4167e129 (patch)
tree96d975ae155ba307acd07968939fff22afedddf7 /kernel
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
parent14f0ca8eaea42a5b5a69cfcb699665dd2618db5f (diff)
downloadlinux-d2852b932f0bb5e89177aa27c7bcf07f4167e129.tar.gz
linux-d2852b932f0bb5e89177aa27c7bcf07f4167e129.tar.bz2
linux-d2852b932f0bb5e89177aa27c7bcf07f4167e129.zip
Merge branch 'oprofile/ring_buffer' into oprofile/oprofile-for-tip
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/ring_buffer.c44
-rw-r--r--kernel/trace/trace.c4
2 files changed, 44 insertions, 4 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 668bbb5ef2bd..d42b882dfe4b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -31,6 +31,7 @@ void tracing_on(void)
{
ring_buffers_off = 0;
}
+EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off all tracing buffers
@@ -44,6 +45,7 @@ void tracing_off(void)
{
ring_buffers_off = 1;
}
+EXPORT_SYMBOL_GPL(tracing_off);
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
@@ -60,12 +62,14 @@ u64 ring_buffer_time_stamp(int cpu)
return time;
}
+EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
{
/* Just stupid testing the normalize function and deltas */
*ts >>= DEBUG_SHIFT;
}
+EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
#define RB_ALIGNMENT_SHIFT 2
@@ -113,8 +117,15 @@ rb_event_length(struct ring_buffer_event *event)
*/
unsigned ring_buffer_event_length(struct ring_buffer_event *event)
{
- return rb_event_length(event);
+ unsigned length = rb_event_length(event);
+ if (event->type != RINGBUF_TYPE_DATA)
+ return length;
+ length -= RB_EVNT_HDR_SIZE;
+ if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
+ length -= sizeof(event->array[0]);
+ return length;
}
+EXPORT_SYMBOL_GPL(ring_buffer_event_length);
/* inline for ring buffer fast paths */
static inline void *
@@ -136,6 +147,7 @@ void *ring_buffer_event_data(struct ring_buffer_event *event)
{
return rb_event_data(event);
}
+EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define for_each_buffer_cpu(buffer, cpu) \
for_each_cpu_mask(cpu, buffer->cpumask)
@@ -381,7 +393,7 @@ extern int ring_buffer_page_too_big(void);
/**
* ring_buffer_alloc - allocate a new ring_buffer
- * @size: the size in bytes that is needed.
+ * @size: the size in bytes per cpu that is needed.
* @flags: attributes to set for the ring buffer.
*
* Currently the only flag that is available is the RB_FL_OVERWRITE
@@ -444,6 +456,7 @@ struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
kfree(buffer);
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_alloc);
/**
* ring_buffer_free - free a ring buffer.
@@ -459,6 +472,7 @@ ring_buffer_free(struct ring_buffer *buffer)
kfree(buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_free);
static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
@@ -620,6 +634,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
mutex_unlock(&buffer->mutex);
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(ring_buffer_resize);
static inline int rb_null_event(struct ring_buffer_event *event)
{
@@ -1220,6 +1235,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
preempt_enable_notrace();
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
@@ -1269,6 +1285,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
return 0;
}
+EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
/**
* ring_buffer_write - write data to the buffer without reserving
@@ -1334,6 +1351,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
return ret;
}
+EXPORT_SYMBOL_GPL(ring_buffer_write);
static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
{
@@ -1360,6 +1378,7 @@ void ring_buffer_record_disable(struct ring_buffer *buffer)
{
atomic_inc(&buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
/**
* ring_buffer_record_enable - enable writes to the buffer
@@ -1372,6 +1391,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer)
{
atomic_dec(&buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
@@ -1393,6 +1413,7 @@ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
atomic_inc(&cpu_buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
/**
* ring_buffer_record_enable_cpu - enable writes to the buffer
@@ -1412,6 +1433,7 @@ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
atomic_dec(&cpu_buffer->record_disabled);
}
+EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
/**
* ring_buffer_entries_cpu - get the number of entries in a cpu buffer
@@ -1428,6 +1450,7 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->entries;
}
+EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
/**
* ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
@@ -1444,6 +1467,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
return cpu_buffer->overrun;
}
+EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
/**
* ring_buffer_entries - get the number of entries in a buffer
@@ -1466,6 +1490,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
return entries;
}
+EXPORT_SYMBOL_GPL(ring_buffer_entries);
/**
* ring_buffer_overrun_cpu - get the number of overruns in buffer
@@ -1488,6 +1513,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
return overruns;
}
+EXPORT_SYMBOL_GPL(ring_buffer_overruns);
/**
* ring_buffer_iter_reset - reset an iterator
@@ -1513,6 +1539,7 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
else
iter->read_stamp = iter->head_page->time_stamp;
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
/**
* ring_buffer_iter_empty - check if an iterator has no more to read
@@ -1527,6 +1554,7 @@ int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
return iter->head_page == cpu_buffer->commit_page &&
iter->head == rb_commit_index(cpu_buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
static void
rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
@@ -1797,6 +1825,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_peek);
/**
* ring_buffer_iter_peek - peek at the next event to be read
@@ -1867,6 +1896,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
return NULL;
}
+EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
/**
* ring_buffer_consume - return an event and consume it
@@ -1894,6 +1924,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
return event;
}
+EXPORT_SYMBOL_GPL(ring_buffer_consume);
/**
* ring_buffer_read_start - start a non consuming read of the buffer
@@ -1934,6 +1965,7 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
return iter;
}
+EXPORT_SYMBOL_GPL(ring_buffer_read_start);
/**
* ring_buffer_finish - finish reading the iterator of the buffer
@@ -1950,6 +1982,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter)
atomic_dec(&cpu_buffer->record_disabled);
kfree(iter);
}
+EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
/**
* ring_buffer_read - read the next item in the ring buffer by the iterator
@@ -1971,6 +2004,7 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
return event;
}
+EXPORT_SYMBOL_GPL(ring_buffer_read);
/**
* ring_buffer_size - return the size of the ring buffer (in bytes)
@@ -1980,6 +2014,7 @@ unsigned long ring_buffer_size(struct ring_buffer *buffer)
{
return BUF_PAGE_SIZE * buffer->pages;
}
+EXPORT_SYMBOL_GPL(ring_buffer_size);
static void
rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
@@ -2022,6 +2057,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
spin_unlock_irqrestore(&cpu_buffer->lock, flags);
}
+EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/**
* ring_buffer_reset - reset a ring buffer
@@ -2034,6 +2070,7 @@ void ring_buffer_reset(struct ring_buffer *buffer)
for_each_buffer_cpu(buffer, cpu)
ring_buffer_reset_cpu(buffer, cpu);
}
+EXPORT_SYMBOL_GPL(ring_buffer_reset);
/**
* rind_buffer_empty - is the ring buffer empty?
@@ -2052,6 +2089,7 @@ int ring_buffer_empty(struct ring_buffer *buffer)
}
return 1;
}
+EXPORT_SYMBOL_GPL(ring_buffer_empty);
/**
* ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
@@ -2068,6 +2106,7 @@ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
cpu_buffer = buffer->buffers[cpu];
return rb_per_cpu_empty(cpu_buffer);
}
+EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
/**
* ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
@@ -2117,6 +2156,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
return 0;
}
+EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d86e3252f300..a96b335fe75c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -914,7 +914,7 @@ enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
};
-static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
+static void trace_iterator_increment(struct trace_iterator *iter)
{
/* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu();
@@ -993,7 +993,7 @@ static void *find_next_entry_inc(struct trace_iterator *iter)
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
if (iter->ent)
- trace_iterator_increment(iter, iter->cpu);
+ trace_iterator_increment(iter);
return iter->ent ? iter : NULL;
}