2009-11-20 00:53:25 -07:00
|
|
|
#include "builtin.h"
|
|
|
|
#include "perf.h"
|
|
|
|
|
|
|
|
#include "util/util.h"
|
|
|
|
#include "util/cache.h"
|
|
|
|
#include "util/symbol.h"
|
|
|
|
#include "util/thread.h"
|
|
|
|
#include "util/header.h"
|
2009-12-11 16:24:02 -07:00
|
|
|
#include "util/session.h"
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
#include "util/parse-options.h"
|
|
|
|
#include "util/trace-event.h"
|
|
|
|
|
|
|
|
#include "util/debug.h"
|
|
|
|
|
|
|
|
#include <linux/rbtree.h>
|
|
|
|
|
|
|
|
struct alloc_stat;
|
|
|
|
typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
|
|
|
|
static char const *input_name = "perf.data";
|
|
|
|
|
|
|
|
static int alloc_flag;
|
|
|
|
static int caller_flag;
|
|
|
|
|
|
|
|
static int alloc_lines = -1;
|
|
|
|
static int caller_lines = -1;
|
|
|
|
|
2009-11-23 22:25:48 -07:00
|
|
|
static bool raw_ip;
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static char default_sort_order[] = "frag,hit,bytes";
|
|
|
|
|
2009-11-23 22:26:31 -07:00
|
|
|
static int *cpunode_map;
|
|
|
|
static int max_cpu_num;
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
struct alloc_stat {
|
2009-11-23 22:26:55 -07:00
|
|
|
u64 call_site;
|
|
|
|
u64 ptr;
|
2009-11-20 00:53:25 -07:00
|
|
|
u64 bytes_req;
|
|
|
|
u64 bytes_alloc;
|
|
|
|
u32 hit;
|
2009-11-23 22:26:55 -07:00
|
|
|
u32 pingpong;
|
|
|
|
|
|
|
|
short alloc_cpu;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
struct rb_node node;
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct rb_root root_alloc_stat;
|
|
|
|
static struct rb_root root_alloc_sorted;
|
|
|
|
static struct rb_root root_caller_stat;
|
|
|
|
static struct rb_root root_caller_sorted;
|
|
|
|
|
|
|
|
static unsigned long total_requested, total_allocated;
|
2009-11-23 22:26:31 -07:00
|
|
|
static unsigned long nr_allocs, nr_cross_allocs;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-11-23 22:26:31 -07:00
|
|
|
#define PATH_SYS_NODE "/sys/devices/system/node"
|
|
|
|
|
|
|
|
static void init_cpunode_map(void)
|
|
|
|
{
|
|
|
|
FILE *fp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
|
|
|
|
if (!fp) {
|
|
|
|
max_cpu_num = 4096;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (fscanf(fp, "%d", &max_cpu_num) < 1)
|
|
|
|
die("Failed to read 'kernel_max' from sysfs");
|
|
|
|
max_cpu_num++;
|
|
|
|
|
|
|
|
cpunode_map = calloc(max_cpu_num, sizeof(int));
|
|
|
|
if (!cpunode_map)
|
|
|
|
die("calloc");
|
|
|
|
for (i = 0; i < max_cpu_num; i++)
|
|
|
|
cpunode_map[i] = -1;
|
|
|
|
fclose(fp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void setup_cpunode_map(void)
|
|
|
|
{
|
|
|
|
struct dirent *dent1, *dent2;
|
|
|
|
DIR *dir1, *dir2;
|
|
|
|
unsigned int cpu, mem;
|
|
|
|
char buf[PATH_MAX];
|
|
|
|
|
|
|
|
init_cpunode_map();
|
|
|
|
|
|
|
|
dir1 = opendir(PATH_SYS_NODE);
|
|
|
|
if (!dir1)
|
|
|
|
return;
|
|
|
|
|
2009-12-19 14:40:28 -07:00
|
|
|
while ((dent1 = readdir(dir1)) != NULL) {
|
|
|
|
if (dent1->d_type != DT_DIR ||
|
|
|
|
sscanf(dent1->d_name, "node%u", &mem) < 1)
|
2009-11-23 22:26:31 -07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
snprintf(buf, PATH_MAX, "%s/%s", PATH_SYS_NODE, dent1->d_name);
|
|
|
|
dir2 = opendir(buf);
|
|
|
|
if (!dir2)
|
|
|
|
continue;
|
2009-12-19 14:40:28 -07:00
|
|
|
while ((dent2 = readdir(dir2)) != NULL) {
|
|
|
|
if (dent2->d_type != DT_LNK ||
|
|
|
|
sscanf(dent2->d_name, "cpu%u", &cpu) < 1)
|
2009-11-23 22:26:31 -07:00
|
|
|
continue;
|
|
|
|
cpunode_map[cpu] = mem;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
static void insert_alloc_stat(unsigned long call_site, unsigned long ptr,
|
|
|
|
int bytes_req, int bytes_alloc, int cpu)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
struct rb_node **node = &root_alloc_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (ptr > data->ptr)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (ptr < data->ptr)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->ptr == ptr) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 02:52:55 -07:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 00:53:25 -07:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2009-11-23 22:26:55 -07:00
|
|
|
if (!data)
|
|
|
|
die("malloc");
|
2009-11-20 00:53:25 -07:00
|
|
|
data->ptr = ptr;
|
2009-11-23 22:26:55 -07:00
|
|
|
data->pingpong = 0;
|
2009-11-20 00:53:25 -07:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_alloc_stat);
|
|
|
|
}
|
2009-11-23 22:26:55 -07:00
|
|
|
data->call_site = call_site;
|
|
|
|
data->alloc_cpu = cpu;
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void insert_caller_stat(unsigned long call_site,
|
|
|
|
int bytes_req, int bytes_alloc)
|
|
|
|
{
|
|
|
|
struct rb_node **node = &root_caller_stat.rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct alloc_stat *data = NULL;
|
|
|
|
|
|
|
|
while (*node) {
|
|
|
|
parent = *node;
|
|
|
|
data = rb_entry(*node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
if (call_site > data->call_site)
|
|
|
|
node = &(*node)->rb_right;
|
|
|
|
else if (call_site < data->call_site)
|
|
|
|
node = &(*node)->rb_left;
|
|
|
|
else
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (data && data->call_site == call_site) {
|
|
|
|
data->hit++;
|
|
|
|
data->bytes_req += bytes_req;
|
2009-12-21 02:52:55 -07:00
|
|
|
data->bytes_alloc += bytes_alloc;
|
2009-11-20 00:53:25 -07:00
|
|
|
} else {
|
|
|
|
data = malloc(sizeof(*data));
|
2009-11-23 22:26:55 -07:00
|
|
|
if (!data)
|
|
|
|
die("malloc");
|
2009-11-20 00:53:25 -07:00
|
|
|
data->call_site = call_site;
|
2009-11-23 22:26:55 -07:00
|
|
|
data->pingpong = 0;
|
2009-11-20 00:53:25 -07:00
|
|
|
data->hit = 1;
|
|
|
|
data->bytes_req = bytes_req;
|
|
|
|
data->bytes_alloc = bytes_alloc;
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, node);
|
|
|
|
rb_insert_color(&data->node, &root_caller_stat);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-12-06 22:04:04 -07:00
|
|
|
static void process_alloc_event(void *data,
|
2009-11-20 00:53:25 -07:00
|
|
|
struct event *event,
|
2009-11-23 22:26:31 -07:00
|
|
|
int cpu,
|
2009-11-20 00:53:25 -07:00
|
|
|
u64 timestamp __used,
|
|
|
|
struct thread *thread __used,
|
2009-11-23 22:26:31 -07:00
|
|
|
int node)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
unsigned long call_site;
|
|
|
|
unsigned long ptr;
|
|
|
|
int bytes_req;
|
|
|
|
int bytes_alloc;
|
2009-11-23 22:26:31 -07:00
|
|
|
int node1, node2;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-12-06 22:04:04 -07:00
|
|
|
ptr = raw_field_value(event, "ptr", data);
|
|
|
|
call_site = raw_field_value(event, "call_site", data);
|
|
|
|
bytes_req = raw_field_value(event, "bytes_req", data);
|
|
|
|
bytes_alloc = raw_field_value(event, "bytes_alloc", data);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu);
|
2009-11-20 00:53:25 -07:00
|
|
|
insert_caller_stat(call_site, bytes_req, bytes_alloc);
|
|
|
|
|
|
|
|
total_requested += bytes_req;
|
|
|
|
total_allocated += bytes_alloc;
|
2009-11-23 22:26:31 -07:00
|
|
|
|
|
|
|
if (node) {
|
|
|
|
node1 = cpunode_map[cpu];
|
2009-12-06 22:04:04 -07:00
|
|
|
node2 = raw_field_value(event, "node", data);
|
2009-11-23 22:26:31 -07:00
|
|
|
if (node1 != node2)
|
|
|
|
nr_cross_allocs++;
|
|
|
|
}
|
|
|
|
nr_allocs++;
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
|
|
|
|
|
|
|
|
static struct alloc_stat *search_alloc_stat(unsigned long ptr,
|
|
|
|
unsigned long call_site,
|
|
|
|
struct rb_root *root,
|
|
|
|
sort_fn_t sort_fn)
|
|
|
|
{
|
|
|
|
struct rb_node *node = root->rb_node;
|
|
|
|
struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
|
|
|
|
|
|
|
|
while (node) {
|
|
|
|
struct alloc_stat *data;
|
|
|
|
int cmp;
|
|
|
|
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
|
|
|
|
|
|
|
cmp = sort_fn(&key, data);
|
|
|
|
if (cmp < 0)
|
|
|
|
node = node->rb_left;
|
|
|
|
else if (cmp > 0)
|
|
|
|
node = node->rb_right;
|
|
|
|
else
|
|
|
|
return data;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-12-06 22:04:04 -07:00
|
|
|
static void process_free_event(void *data,
|
2009-11-23 22:26:55 -07:00
|
|
|
struct event *event,
|
|
|
|
int cpu,
|
2009-11-20 00:53:25 -07:00
|
|
|
u64 timestamp __used,
|
|
|
|
struct thread *thread __used)
|
|
|
|
{
|
2009-11-23 22:26:55 -07:00
|
|
|
unsigned long ptr;
|
|
|
|
struct alloc_stat *s_alloc, *s_caller;
|
|
|
|
|
2009-12-06 22:04:04 -07:00
|
|
|
ptr = raw_field_value(event, "ptr", data);
|
2009-11-23 22:26:55 -07:00
|
|
|
|
|
|
|
s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
|
|
|
|
if (!s_alloc)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cpu != s_alloc->alloc_cpu) {
|
|
|
|
s_alloc->pingpong++;
|
|
|
|
|
|
|
|
s_caller = search_alloc_stat(0, s_alloc->call_site,
|
|
|
|
&root_caller_stat, callsite_cmp);
|
|
|
|
assert(s_caller);
|
|
|
|
s_caller->pingpong++;
|
|
|
|
}
|
|
|
|
s_alloc->alloc_cpu = -1;
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static void process_raw_event(union perf_event *raw_event __used, void *data,
|
|
|
|
int cpu, u64 timestamp, struct thread *thread)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
struct event *event;
|
|
|
|
int type;
|
|
|
|
|
2009-12-06 22:04:04 -07:00
|
|
|
type = trace_parse_common_type(data);
|
2009-11-20 00:53:25 -07:00
|
|
|
event = trace_find_event(type);
|
|
|
|
|
|
|
|
if (!strcmp(event->name, "kmalloc") ||
|
|
|
|
!strcmp(event->name, "kmem_cache_alloc")) {
|
2009-12-06 22:04:04 -07:00
|
|
|
process_alloc_event(data, event, cpu, timestamp, thread, 0);
|
2009-11-20 00:53:25 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(event->name, "kmalloc_node") ||
|
|
|
|
!strcmp(event->name, "kmem_cache_alloc_node")) {
|
2009-12-06 22:04:04 -07:00
|
|
|
process_alloc_event(data, event, cpu, timestamp, thread, 1);
|
2009-11-20 00:53:25 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp(event->name, "kfree") ||
|
|
|
|
!strcmp(event->name, "kmem_cache_free")) {
|
2009-12-06 22:04:04 -07:00
|
|
|
process_free_event(data, event, cpu, timestamp, thread);
|
2009-11-20 00:53:25 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-01-29 09:01:45 -07:00
|
|
|
static int process_sample_event(union perf_event *event,
|
|
|
|
struct perf_sample *sample,
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct perf_session *session)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
struct thread *thread = perf_session__findnew(session, event->ip.pid);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
if (thread == NULL) {
|
|
|
|
pr_debug("problem processing %d event, skipping it.\n",
|
|
|
|
event->header.type);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
|
|
|
|
|
perf session: Parse sample earlier
At perf_session__process_event, so that we reduce the number of lines in eache
tool sample processing routine that now receives a sample_data pointer already
parsed.
This will also be useful in the next patch, where we'll allow sample the
identity fields in MMAP, FORK, EXIT, etc, when it will be possible to see (cpu,
timestamp) just after before every event.
Also validate callchains in perf_session__process_event, i.e. as early as
possible, and keep a counter of the number of events discarded due to invalid
callchains, warning the user about it if it happens.
There is an assumption that was kept that all events have the same sample_type,
that will be dealt with in the future, when this preexisting limitation will be
removed.
Tested-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Ian Munsie <imunsie@au1.ibm.com>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Ian Munsie <imunsie@au1.ibm.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Stephane Eranian <eranian@google.com>
LKML-Reference: <1291318772-30880-4-git-send-email-acme@infradead.org>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2010-12-02 09:10:21 -07:00
|
|
|
process_raw_event(event, sample->raw_data, sample->cpu,
|
|
|
|
sample->time, thread);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-13 14:50:25 -07:00
|
|
|
static struct perf_event_ops event_ops = {
|
2010-04-23 15:34:53 -07:00
|
|
|
.sample = process_sample_event,
|
2011-01-29 09:01:45 -07:00
|
|
|
.comm = perf_event__process_comm,
|
2010-04-23 15:34:53 -07:00
|
|
|
.ordered_samples = true,
|
2009-11-20 00:53:25 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
|
|
|
|
{
|
|
|
|
if (n_alloc == 0)
|
|
|
|
return 0.0;
|
|
|
|
else
|
|
|
|
return 100.0 - (100.0 * n_req / n_alloc);
|
|
|
|
}
|
|
|
|
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
static void __print_result(struct rb_root *root, struct perf_session *session,
|
|
|
|
int n_lines, int is_caller)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
struct rb_node *next;
|
2010-04-27 17:17:50 -07:00
|
|
|
struct machine *machine;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
printf("%.102s\n", graph_dotted_line);
|
|
|
|
printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
|
2010-01-19 10:23:23 -07:00
|
|
|
printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
|
2009-11-23 22:26:55 -07:00
|
|
|
printf("%.102s\n", graph_dotted_line);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
next = rb_first(root);
|
|
|
|
|
2010-04-27 17:17:50 -07:00
|
|
|
machine = perf_session__find_host_machine(session);
|
|
|
|
if (!machine) {
|
2010-04-18 22:32:50 -07:00
|
|
|
pr_err("__print_result: couldn't find kernel information\n");
|
|
|
|
return;
|
|
|
|
}
|
2009-11-20 00:53:25 -07:00
|
|
|
while (next && n_lines--) {
|
2009-11-23 12:51:09 -07:00
|
|
|
struct alloc_stat *data = rb_entry(next, struct alloc_stat,
|
|
|
|
node);
|
|
|
|
struct symbol *sym = NULL;
|
2010-04-01 17:24:38 -07:00
|
|
|
struct map *map;
|
2009-11-23 22:26:55 -07:00
|
|
|
char buf[BUFSIZ];
|
2009-11-23 12:51:09 -07:00
|
|
|
u64 addr;
|
|
|
|
|
|
|
|
if (is_caller) {
|
|
|
|
addr = data->call_site;
|
2009-11-23 22:25:48 -07:00
|
|
|
if (!raw_ip)
|
2010-04-29 11:25:23 -07:00
|
|
|
sym = machine__find_kernel_function(machine, addr, &map, NULL);
|
2009-11-23 12:51:09 -07:00
|
|
|
} else
|
|
|
|
addr = data->ptr;
|
|
|
|
|
|
|
|
if (sym != NULL)
|
2011-01-22 15:37:02 -07:00
|
|
|
snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
|
2010-04-01 17:24:38 -07:00
|
|
|
addr - map->unmap_ip(map, sym->start));
|
2009-11-23 12:51:09 -07:00
|
|
|
else
|
2011-01-22 15:37:02 -07:00
|
|
|
snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
|
2009-11-23 22:26:55 -07:00
|
|
|
printf(" %-34s |", buf);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2010-01-19 10:23:23 -07:00
|
|
|
printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
|
2009-11-23 22:26:55 -07:00
|
|
|
(unsigned long long)data->bytes_alloc,
|
2009-11-20 00:53:25 -07:00
|
|
|
(unsigned long)data->bytes_alloc / data->hit,
|
|
|
|
(unsigned long long)data->bytes_req,
|
|
|
|
(unsigned long)data->bytes_req / data->hit,
|
|
|
|
(unsigned long)data->hit,
|
2009-11-23 22:26:55 -07:00
|
|
|
(unsigned long)data->pingpong,
|
2009-11-20 00:53:25 -07:00
|
|
|
fragmentation(data->bytes_req, data->bytes_alloc));
|
|
|
|
|
|
|
|
next = rb_next(next);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (n_lines == -1)
|
2009-11-23 22:26:55 -07:00
|
|
|
printf(" ... | ... | ... | ... | ... | ... \n");
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
printf("%.102s\n", graph_dotted_line);
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void print_summary(void)
|
|
|
|
{
|
|
|
|
printf("\nSUMMARY\n=======\n");
|
|
|
|
printf("Total bytes requested: %lu\n", total_requested);
|
|
|
|
printf("Total bytes allocated: %lu\n", total_allocated);
|
|
|
|
printf("Total bytes wasted on internal fragmentation: %lu\n",
|
|
|
|
total_allocated - total_requested);
|
|
|
|
printf("Internal fragmentation: %f%%\n",
|
|
|
|
fragmentation(total_requested, total_allocated));
|
2009-11-23 22:26:31 -07:00
|
|
|
printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
static void print_result(struct perf_session *session)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
if (caller_flag)
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
__print_result(&root_caller_sorted, session, caller_lines, 1);
|
2009-11-20 00:53:25 -07:00
|
|
|
if (alloc_flag)
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
__print_result(&root_alloc_sorted, session, alloc_lines, 0);
|
2009-11-20 00:53:25 -07:00
|
|
|
print_summary();
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
struct sort_dimension {
|
|
|
|
const char name[20];
|
|
|
|
sort_fn_t cmp;
|
|
|
|
struct list_head list;
|
|
|
|
};
|
|
|
|
|
|
|
|
static LIST_HEAD(caller_sort);
|
|
|
|
static LIST_HEAD(alloc_sort);
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
static void sort_insert(struct rb_root *root, struct alloc_stat *data,
|
2009-11-23 22:26:10 -07:00
|
|
|
struct list_head *sort_list)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
struct rb_node **new = &(root->rb_node);
|
|
|
|
struct rb_node *parent = NULL;
|
2009-11-23 22:26:10 -07:00
|
|
|
struct sort_dimension *sort;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
while (*new) {
|
|
|
|
struct alloc_stat *this;
|
2009-11-23 22:26:10 -07:00
|
|
|
int cmp = 0;
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
this = rb_entry(*new, struct alloc_stat, node);
|
|
|
|
parent = *new;
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
list_for_each_entry(sort, sort_list, list) {
|
|
|
|
cmp = sort->cmp(data, this);
|
|
|
|
if (cmp)
|
|
|
|
break;
|
|
|
|
}
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
if (cmp > 0)
|
|
|
|
new = &((*new)->rb_left);
|
|
|
|
else
|
|
|
|
new = &((*new)->rb_right);
|
|
|
|
}
|
|
|
|
|
|
|
|
rb_link_node(&data->node, parent, new);
|
|
|
|
rb_insert_color(&data->node, root);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __sort_result(struct rb_root *root, struct rb_root *root_sorted,
|
2009-11-23 22:26:10 -07:00
|
|
|
struct list_head *sort_list)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
|
|
|
struct rb_node *node;
|
|
|
|
struct alloc_stat *data;
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
node = rb_first(root);
|
|
|
|
if (!node)
|
|
|
|
break;
|
|
|
|
|
|
|
|
rb_erase(node, root);
|
|
|
|
data = rb_entry(node, struct alloc_stat, node);
|
2009-11-23 22:26:10 -07:00
|
|
|
sort_insert(root_sorted, data, sort_list);
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sort_result(void)
|
|
|
|
{
|
2009-11-23 22:26:10 -07:00
|
|
|
__sort_result(&root_alloc_stat, &root_alloc_sorted, &alloc_sort);
|
|
|
|
__sort_result(&root_caller_stat, &root_caller_sorted, &caller_sort);
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __cmd_kmem(void)
|
|
|
|
{
|
2009-12-27 16:37:02 -07:00
|
|
|
int err = -EINVAL;
|
2010-12-09 20:09:16 -07:00
|
|
|
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
|
|
|
|
0, false, &event_ops);
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
if (session == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2010-04-01 15:12:13 -07:00
|
|
|
if (perf_session__create_kernel_maps(session) < 0)
|
|
|
|
goto out_delete;
|
|
|
|
|
2009-12-27 16:37:02 -07:00
|
|
|
if (!perf_session__has_traces(session, "kmem record"))
|
|
|
|
goto out_delete;
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
setup_pager();
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
err = perf_session__process_events(session, &event_ops);
|
|
|
|
if (err != 0)
|
|
|
|
goto out_delete;
|
2009-11-20 00:53:25 -07:00
|
|
|
sort_result();
|
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation
from DSO loading, but this happens only for the kernel, and for
the early adopters of perf diff, where this disentanglement
matters most, we'll be testing different kernels, so no problem
here.
Further clarification: right now we create the kernel maps for
the various modules and discontiguous kernel text maps when
loading the DSO, we should do it as a two step process, first
creating the maps, for multiple mappings with the same DSO
store, then doing the dso load just once, for the first hit on
one of the maps sharing this DSO backing store.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-12-13 14:50:29 -07:00
|
|
|
print_result(session);
|
|
|
|
out_delete:
|
|
|
|
perf_session__delete(session);
|
|
|
|
return err;
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const char * const kmem_usage[] = {
|
2009-12-10 00:21:57 -07:00
|
|
|
"perf kmem [<options>] {record|stat}",
|
2009-11-20 00:53:25 -07:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->ptr < r->ptr)
|
|
|
|
return -1;
|
|
|
|
else if (l->ptr > r->ptr)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension ptr_sort_dimension = {
|
|
|
|
.name = "ptr",
|
|
|
|
.cmp = ptr_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->call_site < r->call_site)
|
|
|
|
return -1;
|
|
|
|
else if (l->call_site > r->call_site)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension callsite_sort_dimension = {
|
|
|
|
.name = "callsite",
|
|
|
|
.cmp = callsite_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-22 02:58:00 -07:00
|
|
|
static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->hit < r->hit)
|
|
|
|
return -1;
|
|
|
|
else if (l->hit > r->hit)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension hit_sort_dimension = {
|
|
|
|
.name = "hit",
|
|
|
|
.cmp = hit_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->bytes_alloc < r->bytes_alloc)
|
|
|
|
return -1;
|
|
|
|
else if (l->bytes_alloc > r->bytes_alloc)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension bytes_sort_dimension = {
|
|
|
|
.name = "bytes",
|
|
|
|
.cmp = bytes_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-22 02:58:00 -07:00
|
|
|
static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
double x, y;
|
|
|
|
|
|
|
|
x = fragmentation(l->bytes_req, l->bytes_alloc);
|
|
|
|
y = fragmentation(r->bytes_req, r->bytes_alloc);
|
|
|
|
|
|
|
|
if (x < y)
|
|
|
|
return -1;
|
|
|
|
else if (x > y)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension frag_sort_dimension = {
|
|
|
|
.name = "frag",
|
|
|
|
.cmp = frag_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-23 22:26:55 -07:00
|
|
|
static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
|
|
|
|
{
|
|
|
|
if (l->pingpong < r->pingpong)
|
|
|
|
return -1;
|
|
|
|
else if (l->pingpong > r->pingpong)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct sort_dimension pingpong_sort_dimension = {
|
|
|
|
.name = "pingpong",
|
|
|
|
.cmp = pingpong_cmp,
|
|
|
|
};
|
|
|
|
|
2009-11-23 22:26:10 -07:00
|
|
|
static struct sort_dimension *avail_sorts[] = {
|
|
|
|
&ptr_sort_dimension,
|
|
|
|
&callsite_sort_dimension,
|
|
|
|
&hit_sort_dimension,
|
|
|
|
&bytes_sort_dimension,
|
|
|
|
&frag_sort_dimension,
|
2009-11-23 22:26:55 -07:00
|
|
|
&pingpong_sort_dimension,
|
2009-11-23 22:26:10 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
#define NUM_AVAIL_SORTS \
|
|
|
|
(int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
|
|
|
|
|
|
|
|
static int sort_dimension__add(const char *tok, struct list_head *list)
|
|
|
|
{
|
|
|
|
struct sort_dimension *sort;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_AVAIL_SORTS; i++) {
|
|
|
|
if (!strcmp(avail_sorts[i]->name, tok)) {
|
|
|
|
sort = malloc(sizeof(*sort));
|
|
|
|
if (!sort)
|
|
|
|
die("malloc");
|
|
|
|
memcpy(sort, avail_sorts[i], sizeof(*sort));
|
|
|
|
list_add_tail(&sort->list, list);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int setup_sorting(struct list_head *sort_list, const char *arg)
|
|
|
|
{
|
|
|
|
char *tok;
|
|
|
|
char *str = strdup(arg);
|
|
|
|
|
|
|
|
if (!str)
|
|
|
|
die("strdup");
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
tok = strsep(&str, ",");
|
|
|
|
if (!tok)
|
|
|
|
break;
|
|
|
|
if (sort_dimension__add(tok, sort_list) < 0) {
|
|
|
|
error("Unknown --sort key: '%s'", tok);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
free(str);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
static int parse_sort_opt(const struct option *opt __used,
|
|
|
|
const char *arg, int unset __used)
|
|
|
|
{
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (caller_flag > alloc_flag)
|
2009-11-23 22:26:10 -07:00
|
|
|
return setup_sorting(&caller_sort, arg);
|
2009-11-20 00:53:25 -07:00
|
|
|
else
|
2009-11-23 22:26:10 -07:00
|
|
|
return setup_sorting(&alloc_sort, arg);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
static int parse_caller_opt(const struct option *opt __used,
|
2009-12-10 00:43:34 -07:00
|
|
|
const char *arg __used, int unset __used)
|
2009-11-20 00:53:25 -07:00
|
|
|
{
|
2009-12-10 00:21:57 -07:00
|
|
|
caller_flag = (alloc_flag + 1);
|
|
|
|
return 0;
|
|
|
|
}
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
static int parse_alloc_opt(const struct option *opt __used,
|
2009-12-10 00:43:34 -07:00
|
|
|
const char *arg __used, int unset __used)
|
2009-12-10 00:21:57 -07:00
|
|
|
{
|
|
|
|
alloc_flag = (caller_flag + 1);
|
2009-11-20 00:53:25 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int parse_line_opt(const struct option *opt __used,
|
|
|
|
const char *arg, int unset __used)
|
|
|
|
{
|
|
|
|
int lines;
|
|
|
|
|
|
|
|
if (!arg)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
lines = strtoul(arg, NULL, 10);
|
|
|
|
|
|
|
|
if (caller_flag > alloc_flag)
|
|
|
|
caller_lines = lines;
|
|
|
|
else
|
|
|
|
alloc_lines = lines;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct option kmem_options[] = {
|
|
|
|
OPT_STRING('i', "input", &input_name, "file",
|
|
|
|
"input file name"),
|
2009-12-10 00:21:57 -07:00
|
|
|
OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
|
|
|
|
"show per-callsite statistics",
|
|
|
|
parse_caller_opt),
|
|
|
|
OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
|
|
|
|
"show per-allocation statistics",
|
|
|
|
parse_alloc_opt),
|
2009-11-23 22:26:10 -07:00
|
|
|
OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
|
2009-11-23 22:26:55 -07:00
|
|
|
"sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
|
2009-11-20 00:53:25 -07:00
|
|
|
parse_sort_opt),
|
|
|
|
OPT_CALLBACK('l', "line", NULL, "num",
|
2009-12-10 00:21:57 -07:00
|
|
|
"show n lines",
|
2009-11-20 00:53:25 -07:00
|
|
|
parse_line_opt),
|
2009-11-23 22:25:48 -07:00
|
|
|
OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
|
2009-11-20 00:53:25 -07:00
|
|
|
OPT_END()
|
|
|
|
};
|
|
|
|
|
|
|
|
static const char *record_args[] = {
|
|
|
|
"record",
|
|
|
|
"-a",
|
|
|
|
"-R",
|
|
|
|
"-f",
|
|
|
|
"-c", "1",
|
|
|
|
"-e", "kmem:kmalloc",
|
|
|
|
"-e", "kmem:kmalloc_node",
|
|
|
|
"-e", "kmem:kfree",
|
|
|
|
"-e", "kmem:kmem_cache_alloc",
|
|
|
|
"-e", "kmem:kmem_cache_alloc_node",
|
|
|
|
"-e", "kmem:kmem_cache_free",
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __cmd_record(int argc, const char **argv)
|
|
|
|
{
|
|
|
|
unsigned int rec_argc, i, j;
|
|
|
|
const char **rec_argv;
|
|
|
|
|
|
|
|
rec_argc = ARRAY_SIZE(record_args) + argc - 1;
|
|
|
|
rec_argv = calloc(rec_argc + 1, sizeof(char *));
|
|
|
|
|
2010-11-12 19:35:06 -07:00
|
|
|
if (rec_argv == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2009-11-20 00:53:25 -07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(record_args); i++)
|
|
|
|
rec_argv[i] = strdup(record_args[i]);
|
|
|
|
|
|
|
|
for (j = 1; j < (unsigned int)argc; j++, i++)
|
|
|
|
rec_argv[i] = argv[j];
|
|
|
|
|
|
|
|
return cmd_record(i, rec_argv, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
int cmd_kmem(int argc, const char **argv, const char *prefix __used)
|
|
|
|
{
|
|
|
|
argc = parse_options(argc, argv, kmem_options, kmem_usage, 0);
|
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
if (!argc)
|
2009-11-20 00:53:25 -07:00
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
|
|
|
|
2009-12-15 15:04:40 -07:00
|
|
|
symbol__init();
|
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
if (!strncmp(argv[0], "rec", 3)) {
|
|
|
|
return __cmd_record(argc, argv);
|
|
|
|
} else if (!strcmp(argv[0], "stat")) {
|
|
|
|
setup_cpunode_map();
|
|
|
|
|
|
|
|
if (list_empty(&caller_sort))
|
|
|
|
setup_sorting(&caller_sort, default_sort_order);
|
|
|
|
if (list_empty(&alloc_sort))
|
|
|
|
setup_sorting(&alloc_sort, default_sort_order);
|
2009-11-20 00:53:25 -07:00
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
return __cmd_kmem();
|
2010-01-19 10:26:11 -07:00
|
|
|
} else
|
|
|
|
usage_with_options(kmem_usage, kmem_options);
|
2009-11-23 22:26:31 -07:00
|
|
|
|
2009-12-10 00:21:57 -07:00
|
|
|
return 0;
|
2009-11-20 00:53:25 -07:00
|
|
|
}
|
|
|
|
|