5092dbc96f
This patch adds code that can benchmark the ring buffer as well as test it. This code can be compiled into the kernel (not recommended) or as a module. A separate ring buffer is used to not interfer with other users, like ftrace. It creates a producer and a consumer (option to disable creation of the consumer) and will run for 10 seconds, then sleep for 10 seconds and then repeat. While running, the producer will write 10 byte loads into the ring buffer with just putting in the current CPU number. The reader will continually try to read the buffer. The reader will alternate from reading the buffer via event by event, or by full pages. The output is a pr_info, thus it will fill up the syslogs. Starting ring buffer hammer End ring buffer hammer Time: 9000349 (usecs) Overruns: 12578640 Read: 5358440 (by events) Entries: 0 Total: 17937080 Missed: 0 Hit: 17937080 Entries per millisec: 1993 501 ns per entry Sleeping for 10 secs Starting ring buffer hammer End ring buffer hammer Time: 9936350 (usecs) Overruns: 0 Read: 28146644 (by pages) Entries: 74 Total: 28146718 Missed: 0 Hit: 28146718 Entries per millisec: 2832 353 ns per entry Sleeping for 10 secs Time: is the time the test ran Overruns: the number of events that were overwritten and not read Read: the number of events read (either by pages or events) Entries: the number of entries left in the buffer (the by pages will only read full pages) Total: Entries + Read + Overruns Missed: the number of entries that failed to write Hit: the number of entries that were written The above example shows that it takes ~353 nanosecs per entry when there is a reader, reading by pages (and no overruns) The event by event reader slowed the producer down to 501 nanosecs. [ Impact: see how changes to the ring buffer affect stability and performance ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
51 lines
1.8 KiB
Makefile
51 lines
1.8 KiB
Makefile
|
|
# Do not instrument the tracer itself:
|
|
|
|
ifdef CONFIG_FUNCTION_TRACER
|
|
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
|
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
|
|
|
|
# selftest needs instrumentation
|
|
CFLAGS_trace_selftest_dynamic.o = -pg
|
|
obj-y += trace_selftest_dynamic.o
|
|
endif
|
|
|
|
# If unlikely tracing is enabled, do not trace these files
|
|
ifdef CONFIG_TRACING_BRANCHES
|
|
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
|
|
endif
|
|
|
|
obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o
|
|
obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
|
|
obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o
|
|
|
|
obj-$(CONFIG_TRACING) += trace.o
|
|
obj-$(CONFIG_TRACING) += trace_clock.o
|
|
obj-$(CONFIG_TRACING) += trace_output.o
|
|
obj-$(CONFIG_TRACING) += trace_stat.o
|
|
obj-$(CONFIG_TRACING) += trace_printk.o
|
|
obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
|
|
obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
|
|
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
|
|
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
|
|
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
|
|
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
|
|
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
|
|
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
|
|
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
|
|
obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
|
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
|
|
obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
|
|
obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
|
|
obj-$(CONFIG_POWER_TRACER) += trace_power.o
|
|
obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
|
|
obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
|
|
obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
|
|
obj-$(CONFIG_EVENT_TRACING) += trace_events.o
|
|
obj-$(CONFIG_EVENT_TRACING) += trace_export.o
|
|
obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
|
|
obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o
|
|
obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
|
|
|
|
libftrace-y := ftrace.o
|