1

perf threads: Move threads to its own files

Move threads out of machine and into its own file.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Yang Jihong <yangjihong1@huawei.com>
Cc: Oliver Upton <oliver.upton@linux.dev>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Link: https://lore.kernel.org/r/20240301053646.1449657-6-irogers@google.com
This commit is contained in:
Ian Rogers 2024-02-29 21:36:43 -08:00 committed by Namhyung Kim
parent d436f90a64
commit 93bb5b0d93
5 changed files with 285 additions and 273 deletions

View File

@ -72,6 +72,7 @@ perf-y += ordered-events.o
perf-y += namespaces.o
perf-y += comm.o
perf-y += thread.o
perf-y += threads.o
perf-y += thread_map.o
perf-y += parse-events-flex.o
perf-y += parse-events-bison.o

View File

@ -43,17 +43,6 @@
#include <linux/string.h>
#include <linux/zalloc.h>
struct thread_rb_node {
struct rb_node rb_node;
struct thread *thread;
};
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
{
/* Cast it to handle tid == -1 */
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
}
static struct dso *machine__kernel_dso(struct machine *machine)
{
return map__dso(machine->vmlinux_map);
@ -66,18 +55,6 @@ static void dsos__init(struct dsos *dsos)
init_rwsem(&dsos->lock);
}
void threads__init(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
table->entries = RB_ROOT_CACHED;
init_rwsem(&table->lock);
table->nr = 0;
table->last_match = NULL;
}
}
static int machine__set_mmap_name(struct machine *machine)
{
if (machine__is_host(machine))
@ -210,49 +187,11 @@ static void dsos__exit(struct dsos *dsos)
exit_rwsem(&dsos->lock);
}
static void __threads_table_entry__set_last_match(struct threads_table_entry *table,
struct thread *th);
void threads__remove_all_threads(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
struct rb_node *nd;
down_write(&table->lock);
__threads_table_entry__set_last_match(table, NULL);
nd = rb_first_cached(&table->entries);
while (nd) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
nd = rb_next(nd);
thread__put(trb->thread);
rb_erase_cached(&trb->rb_node, &table->entries);
RB_CLEAR_NODE(&trb->rb_node);
--table->nr;
free(trb);
}
assert(table->nr == 0);
up_write(&table->lock);
}
}
void machine__delete_threads(struct machine *machine)
{
threads__remove_all_threads(&machine->threads);
}
void threads__exit(struct threads *threads)
{
threads__remove_all_threads(threads);
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
exit_rwsem(&table->lock);
}
}
void machine__exit(struct machine *machine)
{
if (machine == NULL)
@ -568,121 +507,6 @@ out_err:
goto out_put;
}
/*
* Front-end cache - TID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
pid_t tid)
{
struct thread *th, *res = NULL;
th = table->last_match;
if (th != NULL) {
if (thread__tid(th) == tid)
res = thread__get(th);
}
return res;
}
static void __threads_table_entry__set_last_match(struct threads_table_entry *table,
struct thread *th)
{
thread__put(table->last_match);
table->last_match = thread__get(th);
}
static void threads_table_entry__set_last_match(struct threads_table_entry *table,
struct thread *th)
{
down_write(&table->lock);
__threads_table_entry__set_last_match(table, th);
up_write(&table->lock);
}
struct thread *threads__find(struct threads *threads, pid_t tid)
{
struct threads_table_entry *table = threads__table(threads, tid);
struct rb_node **p;
struct thread *res = NULL;
down_read(&table->lock);
res = __threads_table_entry__get_last_match(table, tid);
if (res)
return res;
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct rb_node *parent = *p;
struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (thread__tid(th) == tid) {
res = thread__get(th);
break;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
up_read(&table->lock);
if (res)
threads_table_entry__set_last_match(table, res);
return res;
}
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
{
struct threads_table_entry *table = threads__table(threads, tid);
struct rb_node **p;
struct rb_node *parent = NULL;
struct thread *res = NULL;
struct thread_rb_node *nd;
bool leftmost = true;
*created = false;
down_write(&table->lock);
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct thread *th;
parent = *p;
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (thread__tid(th) == tid) {
__threads_table_entry__set_last_match(table, th);
res = thread__get(th);
goto out_unlock;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
p = &(*p)->rb_right;
leftmost = false;
}
}
nd = malloc(sizeof(*nd));
if (nd == NULL)
goto out_unlock;
res = thread__new(pid, tid);
if (!res)
free(nd);
else {
*created = true;
nd->thread = thread__get(res);
rb_link_node(&nd->rb_node, parent, p);
rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
++table->nr;
__threads_table_entry__set_last_match(table, res);
}
out_unlock:
up_write(&table->lock);
return res;
}
/*
* Caller must eventually drop thread->refcnt returned with a successful
* lookup/new thread inserted.
@ -699,7 +523,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
machine__update_thread_pid(machine, th, pid);
return th;
}
if (!create)
return NULL;
@ -1147,20 +970,6 @@ static int machine_fprintf_cb(struct thread *thread, void *data)
return 0;
}
size_t threads__nr(struct threads *threads)
{
size_t nr = 0;
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
down_read(&table->lock);
nr += table->nr;
up_read(&table->lock);
}
return nr;
}
size_t machine__fprintf(struct machine *machine, FILE *fp)
{
struct machine_fprintf_cb_args args = {
@ -2093,39 +1902,6 @@ out_problem:
return 0;
}
void threads__remove(struct threads *threads, struct thread *thread)
{
struct rb_node **p;
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
pid_t tid = thread__tid(thread);
down_write(&table->lock);
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
__threads_table_entry__set_last_match(table, NULL);
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct rb_node *parent = *p;
struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
struct thread *th = nd->thread;
if (RC_CHK_EQUAL(th, thread)) {
thread__put(nd->thread);
rb_erase_cached(&nd->rb_node, &table->entries);
RB_CLEAR_NODE(&nd->rb_node);
--table->nr;
free(nd);
break;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
up_write(&table->lock);
}
void machine__remove_thread(struct machine *machine, struct thread *th)
{
return threads__remove(&machine->threads, th);
@ -3258,30 +3034,6 @@ int thread__resolve_callchain(struct thread *thread,
return ret;
}
int threads__for_each_thread(struct threads *threads,
int (*fn)(struct thread *thread, void *data),
void *data)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
struct rb_node *nd;
down_read(&table->lock);
for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
int rc = fn(trb->thread, data);
if (rc != 0) {
up_read(&table->lock);
return rc;
}
}
up_read(&table->lock);
}
return 0;
}
int machine__for_each_thread(struct machine *machine,
int (*fn)(struct thread *thread, void *p),
void *priv)

View File

@ -7,6 +7,7 @@
#include "maps.h"
#include "dsos.h"
#include "rwsem.h"
#include "threads.h"
struct addr_location;
struct branch_stack;
@ -28,31 +29,6 @@ extern const char *ref_reloc_sym_names[];
struct vdso_info;
#define THREADS__TABLE_BITS 8
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads_table_entry {
struct rb_root_cached entries;
struct rw_semaphore lock;
unsigned int nr;
struct thread *last_match;
};
struct threads {
struct threads_table_entry table[THREADS__TABLE_SIZE];
};
void threads__init(struct threads *threads);
void threads__exit(struct threads *threads);
size_t threads__nr(struct threads *threads);
struct thread *threads__find(struct threads *threads, pid_t tid);
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created);
void threads__remove_all_threads(struct threads *threads);
void threads__remove(struct threads *threads, struct thread *thread);
int threads__for_each_thread(struct threads *threads,
int (*fn)(struct thread *thread, void *data),
void *data);
struct machine {
struct rb_node rb_node;
pid_t pid;

248
tools/perf/util/threads.c Normal file
View File

@ -0,0 +1,248 @@
// SPDX-License-Identifier: GPL-2.0
#include "threads.h"
#include "machine.h"
#include "thread.h"
struct thread_rb_node {
struct rb_node rb_node;
struct thread *thread;
};
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
{
/* Cast it to handle tid == -1 */
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
}
void threads__init(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
table->entries = RB_ROOT_CACHED;
init_rwsem(&table->lock);
table->nr = 0;
table->last_match = NULL;
}
}
void threads__exit(struct threads *threads)
{
threads__remove_all_threads(threads);
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
exit_rwsem(&table->lock);
}
}
size_t threads__nr(struct threads *threads)
{
size_t nr = 0;
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
down_read(&table->lock);
nr += table->nr;
up_read(&table->lock);
}
return nr;
}
/*
* Front-end cache - TID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
static struct thread *__threads_table_entry__get_last_match(struct threads_table_entry *table,
pid_t tid)
{
struct thread *th, *res = NULL;
th = table->last_match;
if (th != NULL) {
if (thread__tid(th) == tid)
res = thread__get(th);
}
return res;
}
static void __threads_table_entry__set_last_match(struct threads_table_entry *table,
struct thread *th)
{
thread__put(table->last_match);
table->last_match = thread__get(th);
}
static void threads_table_entry__set_last_match(struct threads_table_entry *table,
struct thread *th)
{
down_write(&table->lock);
__threads_table_entry__set_last_match(table, th);
up_write(&table->lock);
}
struct thread *threads__find(struct threads *threads, pid_t tid)
{
struct threads_table_entry *table = threads__table(threads, tid);
struct rb_node **p;
struct thread *res = NULL;
down_read(&table->lock);
res = __threads_table_entry__get_last_match(table, tid);
if (res)
return res;
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct rb_node *parent = *p;
struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (thread__tid(th) == tid) {
res = thread__get(th);
break;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
up_read(&table->lock);
if (res)
threads_table_entry__set_last_match(table, res);
return res;
}
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
{
struct threads_table_entry *table = threads__table(threads, tid);
struct rb_node **p;
struct rb_node *parent = NULL;
struct thread *res = NULL;
struct thread_rb_node *nd;
bool leftmost = true;
*created = false;
down_write(&table->lock);
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct thread *th;
parent = *p;
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
if (thread__tid(th) == tid) {
__threads_table_entry__set_last_match(table, th);
res = thread__get(th);
goto out_unlock;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else {
leftmost = false;
p = &(*p)->rb_right;
}
}
nd = malloc(sizeof(*nd));
if (nd == NULL)
goto out_unlock;
res = thread__new(pid, tid);
if (!res)
free(nd);
else {
*created = true;
nd->thread = thread__get(res);
rb_link_node(&nd->rb_node, parent, p);
rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
++table->nr;
__threads_table_entry__set_last_match(table, res);
}
out_unlock:
up_write(&table->lock);
return res;
}
void threads__remove_all_threads(struct threads *threads)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
struct rb_node *nd;
down_write(&table->lock);
__threads_table_entry__set_last_match(table, NULL);
nd = rb_first_cached(&table->entries);
while (nd) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
nd = rb_next(nd);
thread__put(trb->thread);
rb_erase_cached(&trb->rb_node, &table->entries);
RB_CLEAR_NODE(&trb->rb_node);
--table->nr;
free(trb);
}
assert(table->nr == 0);
up_write(&table->lock);
}
}
void threads__remove(struct threads *threads, struct thread *thread)
{
struct rb_node **p;
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
pid_t tid = thread__tid(thread);
down_write(&table->lock);
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
__threads_table_entry__set_last_match(table, NULL);
p = &table->entries.rb_root.rb_node;
while (*p != NULL) {
struct rb_node *parent = *p;
struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
struct thread *th = nd->thread;
if (RC_CHK_EQUAL(th, thread)) {
thread__put(nd->thread);
rb_erase_cached(&nd->rb_node, &table->entries);
RB_CLEAR_NODE(&nd->rb_node);
--table->nr;
free(nd);
break;
}
if (tid < thread__tid(th))
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
up_write(&table->lock);
}
int threads__for_each_thread(struct threads *threads,
int (*fn)(struct thread *thread, void *data),
void *data)
{
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
struct threads_table_entry *table = &threads->table[i];
struct rb_node *nd;
down_read(&table->lock);
for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
int rc = fn(trb->thread, data);
if (rc != 0) {
up_read(&table->lock);
return rc;
}
}
up_read(&table->lock);
}
return 0;
}

35
tools/perf/util/threads.h Normal file
View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __PERF_THREADS_H
#define __PERF_THREADS_H
#include <linux/rbtree.h>
#include "rwsem.h"
struct thread;
#define THREADS__TABLE_BITS 8
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
struct threads_table_entry {
struct rb_root_cached entries;
struct rw_semaphore lock;
unsigned int nr;
struct thread *last_match;
};
struct threads {
struct threads_table_entry table[THREADS__TABLE_SIZE];
};
void threads__init(struct threads *threads);
void threads__exit(struct threads *threads);
size_t threads__nr(struct threads *threads);
struct thread *threads__find(struct threads *threads, pid_t tid);
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created);
void threads__remove_all_threads(struct threads *threads);
void threads__remove(struct threads *threads, struct thread *thread);
int threads__for_each_thread(struct threads *threads,
int (*fn)(struct thread *thread, void *data),
void *data);
#endif /* __PERF_THREADS_H */