1

bcachefs: Better bucket alloc tracepoints

Tracepoints are garbage, and perf trace even cuts off some of our
fields.

Much nicer to just trace a string, and then we can build nicely
formatted output with printbufs.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2024-05-04 16:46:29 -04:00
parent 6202569777
commit 40574946b8
2 changed files with 51 additions and 111 deletions

View File

@ -511,6 +511,44 @@ again:
return ob;
}
static noinline void trace_bucket_alloc2(struct bch_fs *c, struct bch_dev *ca,
enum bch_watermark watermark,
enum bch_data_type data_type,
struct closure *cl,
struct bch_dev_usage *usage,
struct bucket_alloc_state *s,
struct open_bucket *ob)
{
struct printbuf buf = PRINTBUF;
printbuf_tabstop_push(&buf, 24);
prt_printf(&buf, "dev\t%s (%u)\n", ca->name, ca->dev_idx);
prt_printf(&buf, "watermark\t%s\n", bch2_watermarks[watermark]);
prt_printf(&buf, "data type\t%s\n", __bch2_data_types[data_type]);
prt_printf(&buf, "blocking\t%u\n", cl != NULL);
prt_printf(&buf, "free\t%llu\n", usage->d[BCH_DATA_free].buckets);
prt_printf(&buf, "avail\t%llu\n", dev_buckets_free(ca, *usage, watermark));
prt_printf(&buf, "copygc_wait\t%lu/%lli\n",
bch2_copygc_wait_amount(c),
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now));
prt_printf(&buf, "seen\t%llu\n", s->buckets_seen);
prt_printf(&buf, "open\t%llu\n", s->skipped_open);
prt_printf(&buf, "need journal commit\t%llu\n", s->skipped_need_journal_commit);
prt_printf(&buf, "nocow\t%llu\n", s->skipped_nocow);
prt_printf(&buf, "nouse\t%llu\n", s->skipped_nouse);
if (!IS_ERR(ob)) {
prt_printf(&buf, "allocated\t%llu\n", ob->bucket);
trace_bucket_alloc(c, buf.buf);
} else {
prt_printf(&buf, "err\t%s\n", bch2_err_str(PTR_ERR(ob)));
trace_bucket_alloc_fail(c, buf.buf);
}
printbuf_exit(&buf);
}
/**
* bch2_bucket_alloc_trans - allocate a single bucket from a specific device
* @trans: transaction object
@ -583,27 +621,14 @@ err:
ob->data_type = data_type;
if (!IS_ERR(ob))
trace_and_count(c, bucket_alloc, ca,
bch2_watermarks[watermark],
ob->bucket,
usage->d[BCH_DATA_free].buckets,
avail,
bch2_copygc_wait_amount(c),
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
&s,
cl == NULL,
"");
count_event(c, bucket_alloc);
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
trace_and_count(c, bucket_alloc_fail, ca,
bch2_watermarks[watermark],
0,
usage->d[BCH_DATA_free].buckets,
avail,
bch2_copygc_wait_amount(c),
c->copygc_wait - atomic64_read(&c->io_clock[WRITE].now),
&s,
cl == NULL,
bch2_err_str(PTR_ERR(ob)));
count_event(c, bucket_alloc_fail);
if (!IS_ERR(ob)
? trace_bucket_alloc_enabled()
: trace_bucket_alloc_fail_enabled())
trace_bucket_alloc2(c, ca, watermark, data_type, cl, usage, &s, ob);
return ob;
}

View File

@ -638,99 +638,14 @@ DEFINE_EVENT(bch_fs, gc_gens_end,
/* Allocator */
DECLARE_EVENT_CLASS(bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 bucket,
u64 free,
u64 avail,
u64 copygc_wait_amount,
s64 copygc_waiting_for,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err),
TP_STRUCT__entry(
__field(u8, dev )
__array(char, reserve, 16 )
__field(u64, bucket )
__field(u64, free )
__field(u64, avail )
__field(u64, copygc_wait_amount )
__field(s64, copygc_waiting_for )
__field(u64, seen )
__field(u64, open )
__field(u64, need_journal_commit )
__field(u64, nouse )
__field(bool, nonblocking )
__field(u64, nocow )
__array(char, err, 32 )
),
TP_fast_assign(
__entry->dev = ca->dev_idx;
strscpy(__entry->reserve, alloc_reserve, sizeof(__entry->reserve));
__entry->bucket = bucket;
__entry->free = free;
__entry->avail = avail;
__entry->copygc_wait_amount = copygc_wait_amount;
__entry->copygc_waiting_for = copygc_waiting_for;
__entry->seen = s->buckets_seen;
__entry->open = s->skipped_open;
__entry->need_journal_commit = s->skipped_need_journal_commit;
__entry->nouse = s->skipped_nouse;
__entry->nonblocking = nonblocking;
__entry->nocow = s->skipped_nocow;
strscpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("reserve %s bucket %u:%llu free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nocow %llu nonblocking %u err %s",
__entry->reserve,
__entry->dev,
__entry->bucket,
__entry->free,
__entry->avail,
__entry->copygc_wait_amount,
__entry->copygc_waiting_for,
__entry->seen,
__entry->open,
__entry->need_journal_commit,
__entry->nouse,
__entry->nocow,
__entry->nonblocking,
__entry->err)
DEFINE_EVENT(fs_str, bucket_alloc,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 bucket,
u64 free,
u64 avail,
u64 copygc_wait_amount,
s64 copygc_waiting_for,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
TP_PROTO(struct bch_dev *ca, const char *alloc_reserve,
u64 bucket,
u64 free,
u64 avail,
u64 copygc_wait_amount,
s64 copygc_waiting_for,
struct bucket_alloc_state *s,
bool nonblocking,
const char *err),
TP_ARGS(ca, alloc_reserve, bucket, free, avail,
copygc_wait_amount, copygc_waiting_for,
s, nonblocking, err)
DEFINE_EVENT(fs_str, bucket_alloc_fail,
TP_PROTO(struct bch_fs *c, const char *str),
TP_ARGS(c, str)
);
TRACE_EVENT(discard_buckets,