1
linux/kernel/trace/trace_event_profile.c
Li Zefan e00bf2ec60 tracing: Change event->profile_count to be int type
Like total_profile_count, struct ftrace_event_call::profile_count
is protected by event_mutex, so it doesn't need to be atomic_t.

Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Acked-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
LKML-Reference: <4B1DC549.5010705@cn.fujitsu.com>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
2009-12-13 18:37:28 +01:00

123 lines
2.4 KiB
C

/*
* trace event based perf counter profiling
*
* Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
*
*/
#include <linux/module.h>
#include "trace.h"
char *perf_trace_buf;
EXPORT_SYMBOL_GPL(perf_trace_buf);
char *perf_trace_buf_nmi;
EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
/* Count the events in use (per event id, not per instance) */
static int total_profile_count;
static int ftrace_profile_enable_event(struct ftrace_event_call *event)
{
char *buf;
int ret = -ENOMEM;
if (event->profile_count++ > 0)
return 0;
if (!total_profile_count) {
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf;
rcu_assign_pointer(perf_trace_buf, buf);
buf = (char *)alloc_percpu(perf_trace_t);
if (!buf)
goto fail_buf_nmi;
rcu_assign_pointer(perf_trace_buf_nmi, buf);
}
ret = event->profile_enable(event);
if (!ret) {
total_profile_count++;
return 0;
}
fail_buf_nmi:
if (!total_profile_count) {
free_percpu(perf_trace_buf_nmi);
free_percpu(perf_trace_buf);
perf_trace_buf_nmi = NULL;
perf_trace_buf = NULL;
}
fail_buf:
event->profile_count--;
return ret;
}
int ftrace_profile_enable(int event_id)
{
struct ftrace_event_call *event;
int ret = -EINVAL;
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id && event->profile_enable &&
try_module_get(event->mod)) {
ret = ftrace_profile_enable_event(event);
break;
}
}
mutex_unlock(&event_mutex);
return ret;
}
static void ftrace_profile_disable_event(struct ftrace_event_call *event)
{
char *buf, *nmi_buf;
if (--event->profile_count > 0)
return;
event->profile_disable(event);
if (!--total_profile_count) {
buf = perf_trace_buf;
rcu_assign_pointer(perf_trace_buf, NULL);
nmi_buf = perf_trace_buf_nmi;
rcu_assign_pointer(perf_trace_buf_nmi, NULL);
/*
* Ensure every events in profiling have finished before
* releasing the buffers
*/
synchronize_sched();
free_percpu(buf);
free_percpu(nmi_buf);
}
}
void ftrace_profile_disable(int event_id)
{
struct ftrace_event_call *event;
mutex_lock(&event_mutex);
list_for_each_entry(event, &ftrace_events, list) {
if (event->id == event_id) {
ftrace_profile_disable_event(event);
module_put(event->mod);
break;
}
}
mutex_unlock(&event_mutex);
}