1

bcachefs: Improve btree write buffer tracepoints

- add a tracepoint for write_buffer_flush_sync; this is expensive
 - fix the write_buffer_flush_slowpath tracepoint

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-11-02 22:31:16 -04:00
parent c259bd95d1
commit 56db242951
3 changed files with 34 additions and 11 deletions

View File

@ -1578,7 +1578,9 @@ struct bch_sb_field_disk_groups {
x(write_super, 73) \ x(write_super, 73) \
x(trans_restart_would_deadlock_recursion_limit, 74) \ x(trans_restart_would_deadlock_recursion_limit, 74) \
x(trans_restart_write_buffer_flush, 75) \ x(trans_restart_write_buffer_flush, 75) \
x(trans_restart_split_race, 76) x(trans_restart_split_race, 76) \
x(write_buffer_flush_slowpath, 77) \
x(write_buffer_flush_sync, 78)
enum bch_persistent_counters { enum bch_persistent_counters {
#define x(t, n, ...) BCH_COUNTER_##t, #define x(t, n, ...) BCH_COUNTER_##t,

View File

@ -241,7 +241,7 @@ out:
mutex_unlock(&wb->flush_lock); mutex_unlock(&wb->flush_lock);
return ret; return ret;
slowpath: slowpath:
trace_write_buffer_flush_slowpath(trans, i - keys, nr); trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, nr);
/* /*
* Now sort the rest by journal seq and bump the journal pin as we go. * Now sort the rest by journal seq and bump the journal pin as we go.
@ -277,8 +277,12 @@ slowpath:
int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans) int bch2_btree_write_buffer_flush_sync(struct btree_trans *trans)
{ {
struct bch_fs *c = trans->c;
trace_and_count(c, write_buffer_flush_sync, trans, _RET_IP_);
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
mutex_lock(&trans->c->btree_write_buffer.flush_lock); mutex_lock(&c->btree_write_buffer.flush_lock);
return __bch2_btree_write_buffer_flush(trans, 0, true); return __bch2_btree_write_buffer_flush(trans, 0, true);
} }

View File

@ -1334,21 +1334,38 @@ TRACE_EVENT(write_buffer_flush,
__entry->nr, __entry->size, __entry->skipped, __entry->fast) __entry->nr, __entry->size, __entry->skipped, __entry->fast)
); );
TRACE_EVENT(write_buffer_flush_slowpath, TRACE_EVENT(write_buffer_flush_sync,
TP_PROTO(struct btree_trans *trans, size_t nr, size_t size), TP_PROTO(struct btree_trans *trans, unsigned long caller_ip),
TP_ARGS(trans, nr, size), TP_ARGS(trans, caller_ip),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(size_t, nr ) __array(char, trans_fn, 32 )
__field(size_t, size ) __field(unsigned long, caller_ip )
), ),
TP_fast_assign( TP_fast_assign(
__entry->nr = nr; strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
__entry->size = size; __entry->caller_ip = caller_ip;
), ),
TP_printk("%zu/%zu", __entry->nr, __entry->size) TP_printk("%s %pS", __entry->trans_fn, (void *) __entry->caller_ip)
);
TRACE_EVENT(write_buffer_flush_slowpath,
TP_PROTO(struct btree_trans *trans, size_t slowpath, size_t total),
TP_ARGS(trans, slowpath, total),
TP_STRUCT__entry(
__field(size_t, slowpath )
__field(size_t, total )
),
TP_fast_assign(
__entry->slowpath = slowpath;
__entry->total = total;
),
TP_printk("%zu/%zu", __entry->slowpath, __entry->total)
); );
DEFINE_EVENT(str, rebalance_extent, DEFINE_EVENT(str, rebalance_extent,