a6f0eb6adc
The ring_buffer_benchmark does a gettimeofday after every write to the ring buffer in its measurements. This adds the overhead of the call to gettimeofday to the measurements and does not give an accurate picture of the length of time it takes to record a trace. This was first noticed with perf top: ------------------------------------------------------------------------------ PerfTop: 679 irqs/sec kernel:99.9% [1000Hz cpu-clock-msecs], (all, 4 CPUs) ------------------------------------------------------------------------------ samples pcnt kernel function _______ _____ _______________ 1673.00 - 27.8% : trace_clock_local 806.00 - 13.4% : do_gettimeofday 590.00 - 9.8% : rb_reserve_next_event 554.00 - 9.2% : native_read_tsc 431.00 - 7.2% : ring_buffer_lock_reserve 365.00 - 6.1% : __rb_reserve_next 355.00 - 5.9% : rb_end_commit 322.00 - 5.4% : getnstimeofday 268.00 - 4.5% : ring_buffer_unlock_commit 262.00 - 4.4% : ring_buffer_producer_thread [ring_buffer_benchmark] 113.00 - 1.9% : read_tsc 91.00 - 1.5% : debug_smp_processor_id 69.00 - 1.1% : trace_recursive_unlock 66.00 - 1.1% : ring_buffer_event_data 25.00 - 0.4% : _spin_unlock_irq And the length of each write to the ring buffer measured at 310ns. This patch adds a new module parameter called "write_interval" which is defaulted to 50. This is the number of writes performed between timestamps. After this patch perf top shows: ------------------------------------------------------------------------------ PerfTop: 244 irqs/sec kernel:100.0% [1000Hz cpu-clock-msecs], (all, 4 CPUs) ------------------------------------------------------------------------------ samples pcnt kernel function _______ _____ _______________ 2842.00 - 40.4% : trace_clock_local 1043.00 - 14.8% : rb_reserve_next_event 784.00 - 11.1% : ring_buffer_lock_reserve 600.00 - 8.5% : __rb_reserve_next 579.00 - 8.2% : rb_end_commit 440.00 - 6.3% : ring_buffer_unlock_commit 290.00 - 4.1% : ring_buffer_producer_thread [ring_buffer_benchmark] 155.00 - 2.2% : debug_smp_processor_id 117.00 - 1.7% : trace_recursive_unlock 103.00 - 1.5% : ring_buffer_event_data 28.00 - 0.4% : do_gettimeofday 22.00 - 0.3% : _spin_unlock_irq 14.00 - 0.2% : native_read_tsc 11.00 - 0.2% : getnstimeofday do_gettimeofday dropped from 13% usage to a mere 0.4%! (using the default 50 interval) The measurement for each timestamp went from 310ns to 210ns. That's 100ns (1/3rd) overhead that the gettimeofday call was introducing. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
427 lines
8.7 KiB
C
427 lines
8.7 KiB
C
/*
|
|
* ring buffer tester and benchmark
|
|
*
|
|
* Copyright (C) 2009 Steven Rostedt <srostedt@redhat.com>
|
|
*/
|
|
#include <linux/ring_buffer.h>
|
|
#include <linux/completion.h>
|
|
#include <linux/kthread.h>
|
|
#include <linux/module.h>
|
|
#include <linux/time.h>
|
|
|
|
struct rb_page {
|
|
u64 ts;
|
|
local_t commit;
|
|
char data[4080];
|
|
};
|
|
|
|
/* run time and sleep time in seconds */
|
|
#define RUN_TIME 10
|
|
#define SLEEP_TIME 10
|
|
|
|
/* number of events for writer to wake up the reader */
|
|
static int wakeup_interval = 100;
|
|
|
|
static int reader_finish;
|
|
static struct completion read_start;
|
|
static struct completion read_done;
|
|
|
|
static struct ring_buffer *buffer;
|
|
static struct task_struct *producer;
|
|
static struct task_struct *consumer;
|
|
static unsigned long read;
|
|
|
|
static int disable_reader;
|
|
module_param(disable_reader, uint, 0644);
|
|
MODULE_PARM_DESC(disable_reader, "only run producer");
|
|
|
|
static int write_iteration = 50;
|
|
module_param(write_iteration, uint, 0644);
|
|
MODULE_PARM_DESC(write_iteration, "# of writes between timestamp readings");
|
|
|
|
static int read_events;
|
|
|
|
static int kill_test;
|
|
|
|
#define KILL_TEST() \
|
|
do { \
|
|
if (!kill_test) { \
|
|
kill_test = 1; \
|
|
WARN_ON(1); \
|
|
} \
|
|
} while (0)
|
|
|
|
enum event_status {
|
|
EVENT_FOUND,
|
|
EVENT_DROPPED,
|
|
};
|
|
|
|
static enum event_status read_event(int cpu)
|
|
{
|
|
struct ring_buffer_event *event;
|
|
int *entry;
|
|
u64 ts;
|
|
|
|
event = ring_buffer_consume(buffer, cpu, &ts);
|
|
if (!event)
|
|
return EVENT_DROPPED;
|
|
|
|
entry = ring_buffer_event_data(event);
|
|
if (*entry != cpu) {
|
|
KILL_TEST();
|
|
return EVENT_DROPPED;
|
|
}
|
|
|
|
read++;
|
|
return EVENT_FOUND;
|
|
}
|
|
|
|
static enum event_status read_page(int cpu)
|
|
{
|
|
struct ring_buffer_event *event;
|
|
struct rb_page *rpage;
|
|
unsigned long commit;
|
|
void *bpage;
|
|
int *entry;
|
|
int ret;
|
|
int inc;
|
|
int i;
|
|
|
|
bpage = ring_buffer_alloc_read_page(buffer);
|
|
if (!bpage)
|
|
return EVENT_DROPPED;
|
|
|
|
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
|
|
if (ret >= 0) {
|
|
rpage = bpage;
|
|
commit = local_read(&rpage->commit);
|
|
for (i = 0; i < commit && !kill_test; i += inc) {
|
|
|
|
if (i >= (PAGE_SIZE - offsetof(struct rb_page, data))) {
|
|
KILL_TEST();
|
|
break;
|
|
}
|
|
|
|
inc = -1;
|
|
event = (void *)&rpage->data[i];
|
|
switch (event->type_len) {
|
|
case RINGBUF_TYPE_PADDING:
|
|
/* failed writes may be discarded events */
|
|
if (!event->time_delta)
|
|
KILL_TEST();
|
|
inc = event->array[0] + 4;
|
|
break;
|
|
case RINGBUF_TYPE_TIME_EXTEND:
|
|
inc = 8;
|
|
break;
|
|
case 0:
|
|
entry = ring_buffer_event_data(event);
|
|
if (*entry != cpu) {
|
|
KILL_TEST();
|
|
break;
|
|
}
|
|
read++;
|
|
if (!event->array[0]) {
|
|
KILL_TEST();
|
|
break;
|
|
}
|
|
inc = event->array[0] + 4;
|
|
break;
|
|
default:
|
|
entry = ring_buffer_event_data(event);
|
|
if (*entry != cpu) {
|
|
KILL_TEST();
|
|
break;
|
|
}
|
|
read++;
|
|
inc = ((event->type_len + 1) * 4);
|
|
}
|
|
if (kill_test)
|
|
break;
|
|
|
|
if (inc <= 0) {
|
|
KILL_TEST();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
ring_buffer_free_read_page(buffer, bpage);
|
|
|
|
if (ret < 0)
|
|
return EVENT_DROPPED;
|
|
return EVENT_FOUND;
|
|
}
|
|
|
|
static void ring_buffer_consumer(void)
|
|
{
|
|
/* toggle between reading pages and events */
|
|
read_events ^= 1;
|
|
|
|
read = 0;
|
|
while (!reader_finish && !kill_test) {
|
|
int found;
|
|
|
|
do {
|
|
int cpu;
|
|
|
|
found = 0;
|
|
for_each_online_cpu(cpu) {
|
|
enum event_status stat;
|
|
|
|
if (read_events)
|
|
stat = read_event(cpu);
|
|
else
|
|
stat = read_page(cpu);
|
|
|
|
if (kill_test)
|
|
break;
|
|
if (stat == EVENT_FOUND)
|
|
found = 1;
|
|
}
|
|
} while (found && !kill_test);
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
if (reader_finish)
|
|
break;
|
|
|
|
schedule();
|
|
__set_current_state(TASK_RUNNING);
|
|
}
|
|
reader_finish = 0;
|
|
complete(&read_done);
|
|
}
|
|
|
|
static void ring_buffer_producer(void)
|
|
{
|
|
struct timeval start_tv;
|
|
struct timeval end_tv;
|
|
unsigned long long time;
|
|
unsigned long long entries;
|
|
unsigned long long overruns;
|
|
unsigned long missed = 0;
|
|
unsigned long hit = 0;
|
|
unsigned long avg;
|
|
int cnt = 0;
|
|
|
|
/*
|
|
* Hammer the buffer for 10 secs (this may
|
|
* make the system stall)
|
|
*/
|
|
trace_printk("Starting ring buffer hammer\n");
|
|
do_gettimeofday(&start_tv);
|
|
do {
|
|
struct ring_buffer_event *event;
|
|
int *entry;
|
|
int i;
|
|
|
|
for (i = 0; i < write_iteration; i++) {
|
|
event = ring_buffer_lock_reserve(buffer, 10);
|
|
if (!event) {
|
|
missed++;
|
|
} else {
|
|
hit++;
|
|
entry = ring_buffer_event_data(event);
|
|
*entry = smp_processor_id();
|
|
ring_buffer_unlock_commit(buffer, event);
|
|
}
|
|
}
|
|
do_gettimeofday(&end_tv);
|
|
|
|
cnt++;
|
|
if (consumer && !(cnt % wakeup_interval))
|
|
wake_up_process(consumer);
|
|
|
|
#ifndef CONFIG_PREEMPT
|
|
/*
|
|
* If we are a non preempt kernel, the 10 second run will
|
|
* stop everything while it runs. Instead, we will call
|
|
* cond_resched and also add any time that was lost by a
|
|
* rescedule.
|
|
*
|
|
* Do a cond resched at the same frequency we would wake up
|
|
* the reader.
|
|
*/
|
|
if (cnt % wakeup_interval)
|
|
cond_resched();
|
|
#endif
|
|
|
|
} while (end_tv.tv_sec < (start_tv.tv_sec + RUN_TIME) && !kill_test);
|
|
trace_printk("End ring buffer hammer\n");
|
|
|
|
if (consumer) {
|
|
/* Init both completions here to avoid races */
|
|
init_completion(&read_start);
|
|
init_completion(&read_done);
|
|
/* the completions must be visible before the finish var */
|
|
smp_wmb();
|
|
reader_finish = 1;
|
|
/* finish var visible before waking up the consumer */
|
|
smp_wmb();
|
|
wake_up_process(consumer);
|
|
wait_for_completion(&read_done);
|
|
}
|
|
|
|
time = end_tv.tv_sec - start_tv.tv_sec;
|
|
time *= USEC_PER_SEC;
|
|
time += (long long)((long)end_tv.tv_usec - (long)start_tv.tv_usec);
|
|
|
|
entries = ring_buffer_entries(buffer);
|
|
overruns = ring_buffer_overruns(buffer);
|
|
|
|
if (kill_test)
|
|
trace_printk("ERROR!\n");
|
|
trace_printk("Time: %lld (usecs)\n", time);
|
|
trace_printk("Overruns: %lld\n", overruns);
|
|
if (disable_reader)
|
|
trace_printk("Read: (reader disabled)\n");
|
|
else
|
|
trace_printk("Read: %ld (by %s)\n", read,
|
|
read_events ? "events" : "pages");
|
|
trace_printk("Entries: %lld\n", entries);
|
|
trace_printk("Total: %lld\n", entries + overruns + read);
|
|
trace_printk("Missed: %ld\n", missed);
|
|
trace_printk("Hit: %ld\n", hit);
|
|
|
|
/* Convert time from usecs to millisecs */
|
|
do_div(time, USEC_PER_MSEC);
|
|
if (time)
|
|
hit /= (long)time;
|
|
else
|
|
trace_printk("TIME IS ZERO??\n");
|
|
|
|
trace_printk("Entries per millisec: %ld\n", hit);
|
|
|
|
if (hit) {
|
|
/* Calculate the average time in nanosecs */
|
|
avg = NSEC_PER_MSEC / hit;
|
|
trace_printk("%ld ns per entry\n", avg);
|
|
}
|
|
|
|
if (missed) {
|
|
if (time)
|
|
missed /= (long)time;
|
|
|
|
trace_printk("Total iterations per millisec: %ld\n",
|
|
hit + missed);
|
|
|
|
/* it is possible that hit + missed will overflow and be zero */
|
|
if (!(hit + missed)) {
|
|
trace_printk("hit + missed overflowed and totalled zero!\n");
|
|
hit--; /* make it non zero */
|
|
}
|
|
|
|
/* Caculate the average time in nanosecs */
|
|
avg = NSEC_PER_MSEC / (hit + missed);
|
|
trace_printk("%ld ns per entry\n", avg);
|
|
}
|
|
}
|
|
|
|
static void wait_to_die(void)
|
|
{
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
while (!kthread_should_stop()) {
|
|
schedule();
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
}
|
|
__set_current_state(TASK_RUNNING);
|
|
}
|
|
|
|
static int ring_buffer_consumer_thread(void *arg)
|
|
{
|
|
while (!kthread_should_stop() && !kill_test) {
|
|
complete(&read_start);
|
|
|
|
ring_buffer_consumer();
|
|
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
if (kthread_should_stop() || kill_test)
|
|
break;
|
|
|
|
schedule();
|
|
__set_current_state(TASK_RUNNING);
|
|
}
|
|
__set_current_state(TASK_RUNNING);
|
|
|
|
if (kill_test)
|
|
wait_to_die();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ring_buffer_producer_thread(void *arg)
|
|
{
|
|
init_completion(&read_start);
|
|
|
|
while (!kthread_should_stop() && !kill_test) {
|
|
ring_buffer_reset(buffer);
|
|
|
|
if (consumer) {
|
|
smp_wmb();
|
|
wake_up_process(consumer);
|
|
wait_for_completion(&read_start);
|
|
}
|
|
|
|
ring_buffer_producer();
|
|
|
|
trace_printk("Sleeping for 10 secs\n");
|
|
set_current_state(TASK_INTERRUPTIBLE);
|
|
schedule_timeout(HZ * SLEEP_TIME);
|
|
__set_current_state(TASK_RUNNING);
|
|
}
|
|
|
|
if (kill_test)
|
|
wait_to_die();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int __init ring_buffer_benchmark_init(void)
|
|
{
|
|
int ret;
|
|
|
|
/* make a one meg buffer in overwite mode */
|
|
buffer = ring_buffer_alloc(1000000, RB_FL_OVERWRITE);
|
|
if (!buffer)
|
|
return -ENOMEM;
|
|
|
|
if (!disable_reader) {
|
|
consumer = kthread_create(ring_buffer_consumer_thread,
|
|
NULL, "rb_consumer");
|
|
ret = PTR_ERR(consumer);
|
|
if (IS_ERR(consumer))
|
|
goto out_fail;
|
|
}
|
|
|
|
producer = kthread_run(ring_buffer_producer_thread,
|
|
NULL, "rb_producer");
|
|
ret = PTR_ERR(producer);
|
|
|
|
if (IS_ERR(producer))
|
|
goto out_kill;
|
|
|
|
return 0;
|
|
|
|
out_kill:
|
|
if (consumer)
|
|
kthread_stop(consumer);
|
|
|
|
out_fail:
|
|
ring_buffer_free(buffer);
|
|
return ret;
|
|
}
|
|
|
|
static void __exit ring_buffer_benchmark_exit(void)
|
|
{
|
|
kthread_stop(producer);
|
|
if (consumer)
|
|
kthread_stop(consumer);
|
|
ring_buffer_free(buffer);
|
|
}
|
|
|
|
module_init(ring_buffer_benchmark_init);
|
|
module_exit(ring_buffer_benchmark_exit);
|
|
|
|
MODULE_AUTHOR("Steven Rostedt");
|
|
MODULE_DESCRIPTION("ring_buffer_benchmark");
|
|
MODULE_LICENSE("GPL");
|