perf threads: Switch from rbtree to hashmap
The rbtree provides a sorting on entries but this is unused. Switch to using hashmap for O(1) rather than O(log n) find/insert/remove complexity. Signed-off-by: Ian Rogers <irogers@google.com> Acked-by: Namhyung Kim <namhyung@kernel.org> Cc: Yang Jihong <yangjihong1@huawei.com> Cc: Oliver Upton <oliver.upton@linux.dev> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20240301053646.1449657-7-irogers@google.com
This commit is contained in:
parent
93bb5b0d93
commit
412a2ff473
@ -3,25 +3,30 @@
|
|||||||
#include "machine.h"
|
#include "machine.h"
|
||||||
#include "thread.h"
|
#include "thread.h"
|
||||||
|
|
||||||
struct thread_rb_node {
|
|
||||||
struct rb_node rb_node;
|
|
||||||
struct thread *thread;
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
|
static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid)
|
||||||
{
|
{
|
||||||
/* Cast it to handle tid == -1 */
|
/* Cast it to handle tid == -1 */
|
||||||
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
|
return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static size_t key_hash(long key, void *ctx __maybe_unused)
|
||||||
|
{
|
||||||
|
/* The table lookup removes low bit entropy, but this is just ignored here. */
|
||||||
|
return key;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
|
||||||
|
{
|
||||||
|
return key1 == key2;
|
||||||
|
}
|
||||||
|
|
||||||
void threads__init(struct threads *threads)
|
void threads__init(struct threads *threads)
|
||||||
{
|
{
|
||||||
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
||||||
struct threads_table_entry *table = &threads->table[i];
|
struct threads_table_entry *table = &threads->table[i];
|
||||||
|
|
||||||
table->entries = RB_ROOT_CACHED;
|
hashmap__init(&table->shard, key_hash, key_equal, NULL);
|
||||||
init_rwsem(&table->lock);
|
init_rwsem(&table->lock);
|
||||||
table->nr = 0;
|
|
||||||
table->last_match = NULL;
|
table->last_match = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -32,6 +37,7 @@ void threads__exit(struct threads *threads)
|
|||||||
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
||||||
struct threads_table_entry *table = &threads->table[i];
|
struct threads_table_entry *table = &threads->table[i];
|
||||||
|
|
||||||
|
hashmap__clear(&table->shard);
|
||||||
exit_rwsem(&table->lock);
|
exit_rwsem(&table->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -44,7 +50,7 @@ size_t threads__nr(struct threads *threads)
|
|||||||
struct threads_table_entry *table = &threads->table[i];
|
struct threads_table_entry *table = &threads->table[i];
|
||||||
|
|
||||||
down_read(&table->lock);
|
down_read(&table->lock);
|
||||||
nr += table->nr;
|
nr += hashmap__size(&table->shard);
|
||||||
up_read(&table->lock);
|
up_read(&table->lock);
|
||||||
}
|
}
|
||||||
return nr;
|
return nr;
|
||||||
@ -86,28 +92,13 @@ static void threads_table_entry__set_last_match(struct threads_table_entry *tabl
|
|||||||
struct thread *threads__find(struct threads *threads, pid_t tid)
|
struct thread *threads__find(struct threads *threads, pid_t tid)
|
||||||
{
|
{
|
||||||
struct threads_table_entry *table = threads__table(threads, tid);
|
struct threads_table_entry *table = threads__table(threads, tid);
|
||||||
struct rb_node **p;
|
struct thread *res;
|
||||||
struct thread *res = NULL;
|
|
||||||
|
|
||||||
down_read(&table->lock);
|
down_read(&table->lock);
|
||||||
res = __threads_table_entry__get_last_match(table, tid);
|
res = __threads_table_entry__get_last_match(table, tid);
|
||||||
if (res)
|
if (!res) {
|
||||||
return res;
|
if (hashmap__find(&table->shard, tid, &res))
|
||||||
|
res = thread__get(res);
|
||||||
p = &table->entries.rb_root.rb_node;
|
|
||||||
while (*p != NULL) {
|
|
||||||
struct rb_node *parent = *p;
|
|
||||||
struct thread *th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
|
|
||||||
|
|
||||||
if (thread__tid(th) == tid) {
|
|
||||||
res = thread__get(th);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tid < thread__tid(th))
|
|
||||||
p = &(*p)->rb_left;
|
|
||||||
else
|
|
||||||
p = &(*p)->rb_right;
|
|
||||||
}
|
}
|
||||||
up_read(&table->lock);
|
up_read(&table->lock);
|
||||||
if (res)
|
if (res)
|
||||||
@ -118,49 +109,25 @@ struct thread *threads__find(struct threads *threads, pid_t tid)
|
|||||||
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
|
struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created)
|
||||||
{
|
{
|
||||||
struct threads_table_entry *table = threads__table(threads, tid);
|
struct threads_table_entry *table = threads__table(threads, tid);
|
||||||
struct rb_node **p;
|
|
||||||
struct rb_node *parent = NULL;
|
|
||||||
struct thread *res = NULL;
|
struct thread *res = NULL;
|
||||||
struct thread_rb_node *nd;
|
|
||||||
bool leftmost = true;
|
|
||||||
|
|
||||||
*created = false;
|
*created = false;
|
||||||
down_write(&table->lock);
|
down_write(&table->lock);
|
||||||
p = &table->entries.rb_root.rb_node;
|
|
||||||
while (*p != NULL) {
|
|
||||||
struct thread *th;
|
|
||||||
|
|
||||||
parent = *p;
|
|
||||||
th = rb_entry(parent, struct thread_rb_node, rb_node)->thread;
|
|
||||||
|
|
||||||
if (thread__tid(th) == tid) {
|
|
||||||
__threads_table_entry__set_last_match(table, th);
|
|
||||||
res = thread__get(th);
|
|
||||||
goto out_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tid < thread__tid(th))
|
|
||||||
p = &(*p)->rb_left;
|
|
||||||
else {
|
|
||||||
leftmost = false;
|
|
||||||
p = &(*p)->rb_right;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nd = malloc(sizeof(*nd));
|
|
||||||
if (nd == NULL)
|
|
||||||
goto out_unlock;
|
|
||||||
res = thread__new(pid, tid);
|
res = thread__new(pid, tid);
|
||||||
if (!res)
|
if (res) {
|
||||||
free(nd);
|
if (hashmap__add(&table->shard, tid, res)) {
|
||||||
else {
|
/* Add failed. Assume a race so find other entry. */
|
||||||
*created = true;
|
thread__put(res);
|
||||||
nd->thread = thread__get(res);
|
res = NULL;
|
||||||
rb_link_node(&nd->rb_node, parent, p);
|
if (hashmap__find(&table->shard, tid, &res))
|
||||||
rb_insert_color_cached(&nd->rb_node, &table->entries, leftmost);
|
res = thread__get(res);
|
||||||
++table->nr;
|
} else {
|
||||||
__threads_table_entry__set_last_match(table, res);
|
res = thread__get(res);
|
||||||
|
*created = true;
|
||||||
|
}
|
||||||
|
if (res)
|
||||||
|
__threads_table_entry__set_last_match(table, res);
|
||||||
}
|
}
|
||||||
out_unlock:
|
|
||||||
up_write(&table->lock);
|
up_write(&table->lock);
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
@ -169,57 +136,32 @@ void threads__remove_all_threads(struct threads *threads)
|
|||||||
{
|
{
|
||||||
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
||||||
struct threads_table_entry *table = &threads->table[i];
|
struct threads_table_entry *table = &threads->table[i];
|
||||||
struct rb_node *nd;
|
struct hashmap_entry *cur, *tmp;
|
||||||
|
size_t bkt;
|
||||||
|
|
||||||
down_write(&table->lock);
|
down_write(&table->lock);
|
||||||
__threads_table_entry__set_last_match(table, NULL);
|
__threads_table_entry__set_last_match(table, NULL);
|
||||||
nd = rb_first_cached(&table->entries);
|
hashmap__for_each_entry_safe((&table->shard), cur, tmp, bkt) {
|
||||||
while (nd) {
|
struct thread *old_value;
|
||||||
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
|
|
||||||
|
|
||||||
nd = rb_next(nd);
|
hashmap__delete(&table->shard, cur->key, /*old_key=*/NULL, &old_value);
|
||||||
thread__put(trb->thread);
|
thread__put(old_value);
|
||||||
rb_erase_cached(&trb->rb_node, &table->entries);
|
|
||||||
RB_CLEAR_NODE(&trb->rb_node);
|
|
||||||
--table->nr;
|
|
||||||
|
|
||||||
free(trb);
|
|
||||||
}
|
}
|
||||||
assert(table->nr == 0);
|
|
||||||
up_write(&table->lock);
|
up_write(&table->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void threads__remove(struct threads *threads, struct thread *thread)
|
void threads__remove(struct threads *threads, struct thread *thread)
|
||||||
{
|
{
|
||||||
struct rb_node **p;
|
|
||||||
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
|
struct threads_table_entry *table = threads__table(threads, thread__tid(thread));
|
||||||
pid_t tid = thread__tid(thread);
|
struct thread *old_value;
|
||||||
|
|
||||||
down_write(&table->lock);
|
down_write(&table->lock);
|
||||||
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
|
if (table->last_match && RC_CHK_EQUAL(table->last_match, thread))
|
||||||
__threads_table_entry__set_last_match(table, NULL);
|
__threads_table_entry__set_last_match(table, NULL);
|
||||||
|
|
||||||
p = &table->entries.rb_root.rb_node;
|
hashmap__delete(&table->shard, thread__tid(thread), /*old_key=*/NULL, &old_value);
|
||||||
while (*p != NULL) {
|
thread__put(old_value);
|
||||||
struct rb_node *parent = *p;
|
|
||||||
struct thread_rb_node *nd = rb_entry(parent, struct thread_rb_node, rb_node);
|
|
||||||
struct thread *th = nd->thread;
|
|
||||||
|
|
||||||
if (RC_CHK_EQUAL(th, thread)) {
|
|
||||||
thread__put(nd->thread);
|
|
||||||
rb_erase_cached(&nd->rb_node, &table->entries);
|
|
||||||
RB_CLEAR_NODE(&nd->rb_node);
|
|
||||||
--table->nr;
|
|
||||||
free(nd);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tid < thread__tid(th))
|
|
||||||
p = &(*p)->rb_left;
|
|
||||||
else
|
|
||||||
p = &(*p)->rb_right;
|
|
||||||
}
|
|
||||||
up_write(&table->lock);
|
up_write(&table->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,12 +171,12 @@ int threads__for_each_thread(struct threads *threads,
|
|||||||
{
|
{
|
||||||
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
for (int i = 0; i < THREADS__TABLE_SIZE; i++) {
|
||||||
struct threads_table_entry *table = &threads->table[i];
|
struct threads_table_entry *table = &threads->table[i];
|
||||||
struct rb_node *nd;
|
struct hashmap_entry *cur;
|
||||||
|
size_t bkt;
|
||||||
|
|
||||||
down_read(&table->lock);
|
down_read(&table->lock);
|
||||||
for (nd = rb_first_cached(&table->entries); nd; nd = rb_next(nd)) {
|
hashmap__for_each_entry((&table->shard), cur, bkt) {
|
||||||
struct thread_rb_node *trb = rb_entry(nd, struct thread_rb_node, rb_node);
|
int rc = fn((struct thread *)cur->pvalue, data);
|
||||||
int rc = fn(trb->thread, data);
|
|
||||||
|
|
||||||
if (rc != 0) {
|
if (rc != 0) {
|
||||||
up_read(&table->lock);
|
up_read(&table->lock);
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
#ifndef __PERF_THREADS_H
|
#ifndef __PERF_THREADS_H
|
||||||
#define __PERF_THREADS_H
|
#define __PERF_THREADS_H
|
||||||
|
|
||||||
#include <linux/rbtree.h>
|
#include "hashmap.h"
|
||||||
#include "rwsem.h"
|
#include "rwsem.h"
|
||||||
|
|
||||||
struct thread;
|
struct thread;
|
||||||
@ -11,9 +11,9 @@ struct thread;
|
|||||||
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
|
#define THREADS__TABLE_SIZE (1 << THREADS__TABLE_BITS)
|
||||||
|
|
||||||
struct threads_table_entry {
|
struct threads_table_entry {
|
||||||
struct rb_root_cached entries;
|
/* Key is tid, value is struct thread. */
|
||||||
|
struct hashmap shard;
|
||||||
struct rw_semaphore lock;
|
struct rw_semaphore lock;
|
||||||
unsigned int nr;
|
|
||||||
struct thread *last_match;
|
struct thread *last_match;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user