1

bcachefs: Option improvements

This adds flags for options that must be a power of two (block size and
btree node size), and options that are stored in the superblock as a
power of two (encoded extent max).

Also: options are now stored in memory in the same units they're
displayed in (bytes): we now convert when getting and setting from the
superblock.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2021-12-14 14:24:41 -05:00 committed by Kent Overstreet
parent d05117e36a
commit 8244f3209b
17 changed files with 205 additions and 122 deletions

View File

@ -928,10 +928,20 @@ static inline unsigned bucket_bytes(const struct bch_dev *ca)
static inline unsigned block_bytes(const struct bch_fs *c) static inline unsigned block_bytes(const struct bch_fs *c)
{ {
return c->opts.block_size << 9; return c->opts.block_size;
} }
static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, s64 time) static inline unsigned block_sectors(const struct bch_fs *c)
{
return c->opts.block_size >> 9;
}
static inline size_t btree_sectors(const struct bch_fs *c)
{
return c->opts.btree_node_size >> 9;
}
static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
{ {
struct timespec64 t; struct timespec64 t;
s32 rem; s32 rem;
@ -943,13 +953,13 @@ static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, s64 time
return t; return t;
} }
static inline s64 timespec_to_bch2_time(struct bch_fs *c, struct timespec64 ts) static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
{ {
return (ts.tv_sec * c->sb.time_units_per_sec + return (ts.tv_sec * c->sb.time_units_per_sec +
(int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo; (int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
} }
static inline s64 bch2_current_time(struct bch_fs *c) static inline s64 bch2_current_time(const struct bch_fs *c)
{ {
struct timespec64 now; struct timespec64 now;

View File

@ -69,7 +69,7 @@ static inline bool btree_node_hashed(struct btree *b)
static inline size_t btree_bytes(struct bch_fs *c) static inline size_t btree_bytes(struct bch_fs *c)
{ {
return c->opts.btree_node_size << 9; return c->opts.btree_node_size;
} }
static inline size_t btree_max_u64s(struct bch_fs *c) static inline size_t btree_max_u64s(struct bch_fs *c)
@ -84,7 +84,7 @@ static inline size_t btree_pages(struct bch_fs *c)
static inline unsigned btree_blocks(struct bch_fs *c) static inline unsigned btree_blocks(struct bch_fs *c)
{ {
return c->opts.btree_node_size >> c->block_bits; return btree_sectors(c) >> c->block_bits;
} }
#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3) #define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)

View File

@ -682,7 +682,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
BTREE_ERR_FATAL, c, ca, b, i, BTREE_ERR_FATAL, c, ca, b, i,
"BSET_SEPARATE_WHITEOUTS no longer supported"); "BSET_SEPARATE_WHITEOUTS no longer supported");
if (btree_err_on(offset + sectors > c->opts.btree_node_size, if (btree_err_on(offset + sectors > btree_sectors(c),
BTREE_ERR_FIXABLE, c, ca, b, i, BTREE_ERR_FIXABLE, c, ca, b, i,
"bset past end of btree node")) { "bset past end of btree node")) {
i->u64s = 0; i->u64s = 0;
@ -896,7 +896,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
b->data->keys.seq, bp->seq); b->data->keys.seq, bp->seq);
} }
while (b->written < (ptr_written ?: c->opts.btree_node_size)) { while (b->written < (ptr_written ?: btree_sectors(c))) {
unsigned sectors, whiteout_u64s = 0; unsigned sectors, whiteout_u64s = 0;
struct nonce nonce; struct nonce nonce;
struct bch_csum csum; struct bch_csum csum;
@ -1204,7 +1204,7 @@ static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
if (le64_to_cpu(bn->magic) != bset_magic(c)) if (le64_to_cpu(bn->magic) != bset_magic(c))
return 0; return 0;
while (offset < c->opts.btree_node_size) { while (offset < btree_sectors(c)) {
if (!offset) { if (!offset) {
offset += vstruct_sectors(bn, c->block_bits); offset += vstruct_sectors(bn, c->block_bits);
} else { } else {
@ -1226,7 +1226,7 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *
if (!offset) if (!offset)
return false; return false;
while (offset < c->opts.btree_node_size) { while (offset < btree_sectors(c)) {
bne = data + (offset << 9); bne = data + (offset << 9);
if (bne->keys.seq == bn->keys.seq) if (bne->keys.seq == bn->keys.seq)
return true; return true;
@ -1296,7 +1296,7 @@ fsck_err:
if (ra->err[i]) if (ra->err[i])
continue; continue;
while (offset < c->opts.btree_node_size) { while (offset < btree_sectors(c)) {
if (!offset) { if (!offset) {
sectors = vstruct_sectors(bn, c->block_bits); sectors = vstruct_sectors(bn, c->block_bits);
} else { } else {
@ -1313,7 +1313,7 @@ fsck_err:
offset += sectors; offset += sectors;
} }
while (offset < c->opts.btree_node_size) { while (offset < btree_sectors(c)) {
bne = ra->buf[i] + (offset << 9); bne = ra->buf[i] + (offset << 9);
if (bne->keys.seq == bn->keys.seq) { if (bne->keys.seq == bn->keys.seq) {
if (!gap) if (!gap)
@ -1793,8 +1793,8 @@ do_write:
BUG_ON(btree_node_fake(b)); BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written); BUG_ON((b->will_make_reachable != 0) != !b->written);
BUG_ON(b->written >= c->opts.btree_node_size); BUG_ON(b->written >= btree_sectors(c));
BUG_ON(b->written & (c->opts.block_size - 1)); BUG_ON(b->written & (block_sectors(c) - 1));
BUG_ON(bset_written(b, btree_bset_last(b))); BUG_ON(bset_written(b, btree_bset_last(b)));
BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c)); BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format))); BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
@ -1867,7 +1867,7 @@ do_write:
memset(data + bytes_to_write, 0, memset(data + bytes_to_write, 0,
(sectors_to_write << 9) - bytes_to_write); (sectors_to_write << 9) - bytes_to_write);
BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size); BUG_ON(b->written + sectors_to_write > btree_sectors(c));
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN); BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq); BUG_ON(i->seq != b->data->keys.seq);

View File

@ -223,12 +223,12 @@ retry:
if (IS_ERR(wp)) if (IS_ERR(wp))
return ERR_CAST(wp); return ERR_CAST(wp);
if (wp->sectors_free < c->opts.btree_node_size) { if (wp->sectors_free < btree_sectors(c)) {
struct open_bucket *ob; struct open_bucket *ob;
unsigned i; unsigned i;
open_bucket_for_each(c, &wp->ptrs, ob, i) open_bucket_for_each(c, &wp->ptrs, ob, i)
if (ob->sectors_free < c->opts.btree_node_size) if (ob->sectors_free < btree_sectors(c))
ob->sectors_free = 0; ob->sectors_free = 0;
bch2_alloc_sectors_done(c, wp); bch2_alloc_sectors_done(c, wp);
@ -236,7 +236,7 @@ retry:
} }
bkey_btree_ptr_v2_init(&tmp.k); bkey_btree_ptr_v2_init(&tmp.k);
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size); bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c));
bch2_open_bucket_get(c, wp, &ob); bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp); bch2_alloc_sectors_done(c, wp);
@ -1029,7 +1029,7 @@ retry:
} }
ret = bch2_disk_reservation_get(c, &as->disk_res, ret = bch2_disk_reservation_get(c, &as->disk_res,
nr_nodes * c->opts.btree_node_size, nr_nodes * btree_sectors(c),
c->opts.metadata_replicas, c->opts.metadata_replicas,
disk_res_flags); disk_res_flags);
if (ret) if (ret)

View File

@ -218,7 +218,7 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
{ {
ssize_t used = bset_byte_offset(b, end) / sizeof(u64) + ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
b->whiteout_u64s; b->whiteout_u64s;
ssize_t total = c->opts.btree_node_size << 6; ssize_t total = c->opts.btree_node_size >> 3;
/* Always leave one extra u64 for bch2_varint_decode: */ /* Always leave one extra u64 for bch2_varint_decode: */
used++; used++;

View File

@ -1000,7 +1000,7 @@ static int bch2_mark_extent(struct btree_trans *trans,
? BCH_DATA_btree ? BCH_DATA_btree
: BCH_DATA_user; : BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k) s64 sectors = bkey_is_btree_ptr(k.k)
? c->opts.btree_node_size ? btree_sectors(c)
: k.k->size; : k.k->size;
s64 dirty_sectors = 0; s64 dirty_sectors = 0;
bool stale; bool stale;
@ -1609,7 +1609,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
? BCH_DATA_btree ? BCH_DATA_btree
: BCH_DATA_user; : BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k) s64 sectors = bkey_is_btree_ptr(k.k)
? c->opts.btree_node_size ? btree_sectors(c)
: k.k->size; : k.k->size;
s64 dirty_sectors = 0; s64 dirty_sectors = 0;
bool stale; bool stale;
@ -2184,7 +2184,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
alloc_heap alloc_heap; alloc_heap alloc_heap;
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE, size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / c->opts.btree_node_size); ca->mi.bucket_size / btree_sectors(c));
/* XXX: these should be tunable */ /* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9); size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6); size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);

View File

@ -376,7 +376,7 @@ static unsigned __bio_compress(struct bch_fs *c,
BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type])); BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type]));
/* If it's only one block, don't bother trying to compress: */ /* If it's only one block, don't bother trying to compress: */
if (bio_sectors(src) <= c->opts.block_size) if (src->bi_iter.bi_size <= c->opts.block_size)
return 0; return 0;
dst_data = bio_map_or_bounce(c, dst, WRITE); dst_data = bio_map_or_bounce(c, dst, WRITE);

View File

@ -1037,7 +1037,7 @@ const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
if (k.k->type == KEY_TYPE_btree_ptr || if (k.k->type == KEY_TYPE_btree_ptr ||
k.k->type == KEY_TYPE_btree_ptr_v2) k.k->type == KEY_TYPE_btree_ptr_v2)
size_ondisk = c->opts.btree_node_size; size_ondisk = btree_sectors(c);
bkey_extent_entry_for_each(ptrs, entry) { bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX) if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)

View File

@ -868,8 +868,8 @@ static int bch2_fill_extent(struct bch_fs *c,
else else
offset += p.crc.offset; offset += p.crc.offset;
if ((offset & (c->opts.block_size - 1)) || if ((offset & (block_sectors(c) - 1)) ||
(k.k->size & (c->opts.block_size - 1))) (k.k->size & (block_sectors(c) - 1)))
flags2 |= FIEMAP_EXTENT_NOT_ALIGNED; flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
ret = fiemap_fill_next_extent(info, ret = fiemap_fill_next_extent(info,
@ -1683,7 +1683,7 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
const struct bch_option *opt = &bch2_opt_table[i]; const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i); u64 v = bch2_opt_get_by_id(&c->opts, i);
if (!(opt->mode & OPT_MOUNT)) if (!(opt->flags & OPT_MOUNT))
continue; continue;
if (v == bch2_opt_get_by_id(&bch2_opts_default, i)) if (v == bch2_opt_get_by_id(&bch2_opts_default, i))

View File

@ -1357,7 +1357,7 @@ void bch2_write(struct closure *cl)
bch2_keylist_init(&op->insert_keys, op->inline_keys); bch2_keylist_init(&op->insert_keys, op->inline_keys);
wbio_init(bio)->put_bio = false; wbio_init(bio)->put_bio = false;
if (bio_sectors(bio) & (c->opts.block_size - 1)) { if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
bch_err_inum_ratelimited(c, op->pos.inode, bch_err_inum_ratelimited(c, op->pos.inode,
"misaligned write"); "misaligned write");
op->error = -EIO; op->error = -EIO;
@ -2437,7 +2437,7 @@ int bch2_fs_io_init(struct bch_fs *c)
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS) ||
mempool_init_page_pool(&c->bio_bounce_pages, mempool_init_page_pool(&c->bio_bounce_pages,
max_t(unsigned, max_t(unsigned,
c->opts.btree_node_size, btree_sectors(c),
c->sb.encoded_extent_max) / c->sb.encoded_extent_max) /
PAGE_SECTORS, 0) || PAGE_SECTORS, 0) ||
rhashtable_init(&c->promote_table, &bch_promote_params)) rhashtable_init(&c->promote_table, &bch_promote_params))

View File

@ -709,7 +709,7 @@ reread:
case JOURNAL_ENTRY_NONE: case JOURNAL_ENTRY_NONE:
if (!saw_bad) if (!saw_bad)
return 0; return 0;
sectors = c->opts.block_size; sectors = block_sectors(c);
goto next_block; goto next_block;
case JOURNAL_ENTRY_BAD: case JOURNAL_ENTRY_BAD:
saw_bad = true; saw_bad = true;
@ -718,7 +718,7 @@ reread:
* field of the journal entry we read, so try reading * field of the journal entry we read, so try reading
* again at next block boundary: * again at next block boundary:
*/ */
sectors = c->opts.block_size; sectors = block_sectors(c);
break; break;
default: default:
return ret; return ret;

View File

@ -141,41 +141,27 @@ void bch2_opt_set_by_id(struct bch_opts *opts, enum bch_opt_id id, u64 v)
} }
} }
/*
* Initial options from superblock - here we don't want any options undefined,
* any options the superblock doesn't specify are set to 0:
*/
struct bch_opts bch2_opts_from_sb(struct bch_sb *sb)
{
struct bch_opts opts = bch2_opts_empty();
#define x(_name, _bits, _mode, _type, _sb_opt, ...) \
if (_sb_opt != NO_SB_OPT) \
opt_set(opts, _name, _sb_opt(sb));
BCH_OPTS()
#undef x
return opts;
}
const struct bch_option bch2_opt_table[] = { const struct bch_option bch2_opt_table[] = {
#define OPT_BOOL() .type = BCH_OPT_BOOL #define OPT_BOOL() .type = BCH_OPT_BOOL, .min = 0, .max = 2
#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, .min = _min, .max = _max #define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \
#define OPT_SECTORS(_min, _max) .type = BCH_OPT_SECTORS, .min = _min, .max = _max .min = _min, .max = _max
#define OPT_STR(_choices) .type = BCH_OPT_STR, .choices = _choices #define OPT_STR(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = ARRAY_SIZE(_choices),\
.choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, \ #define OPT_FN(_fn) .type = BCH_OPT_FN, \
.parse = _fn##_parse, \ .parse = _fn##_parse, \
.to_text = _fn##_to_text .to_text = _fn##_to_text
#define x(_name, _bits, _mode, _type, _sb_opt, _default, _hint, _help) \ #define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
[Opt_##_name] = { \ [Opt_##_name] = { \
.attr = { \ .attr = { \
.name = #_name, \ .name = #_name, \
.mode = (_mode) & OPT_RUNTIME ? 0644 : 0444, \ .mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
}, \ }, \
.mode = _mode, \ .flags = _flags, \
.hint = _hint, \ .hint = _hint, \
.help = _help, \ .help = _help, \
.get_sb = _sb_opt, \
.set_sb = SET_##_sb_opt, \ .set_sb = SET_##_sb_opt, \
_type \ _type \
}, },
@ -218,7 +204,41 @@ static int bch2_mount_opt_lookup(const char *name)
return bch2_opt_lookup(name); return bch2_opt_lookup(name);
} }
int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt, static int bch2_opt_validate(const struct bch_option *opt, const char *msg, u64 v)
{
if (v < opt->min) {
if (msg)
pr_err("invalid %s%s: too small (min %llu)",
msg, opt->attr.name, opt->min);
return -ERANGE;
}
if (opt->max && v >= opt->max) {
if (msg)
pr_err("invalid %s%s: too big (max %llu)",
msg, opt->attr.name, opt->max);
return -ERANGE;
}
if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
if (msg)
pr_err("invalid %s %s: not a multiple of 512",
msg, opt->attr.name);
return -EINVAL;
}
if ((opt->flags & OPT_MUST_BE_POW_2) && !is_power_of_2(v)) {
if (msg)
pr_err("invalid %s%s: must be a power of two",
msg, opt->attr.name);
return -EINVAL;
}
return 0;
}
int bch2_opt_parse(struct bch_fs *c, const char *msg,
const struct bch_option *opt,
const char *val, u64 *res) const char *val, u64 *res)
{ {
ssize_t ret; ssize_t ret;
@ -228,30 +248,13 @@ int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt,
ret = kstrtou64(val, 10, res); ret = kstrtou64(val, 10, res);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (*res > 1)
return -ERANGE;
break; break;
case BCH_OPT_UINT: case BCH_OPT_UINT:
ret = kstrtou64(val, 10, res); ret = opt->flags & OPT_HUMAN_READABLE
? bch2_strtou64_h(val, res)
: kstrtou64(val, 10, res);
if (ret < 0) if (ret < 0)
return ret; return ret;
if (*res < opt->min || *res >= opt->max)
return -ERANGE;
break;
case BCH_OPT_SECTORS:
ret = bch2_strtou64_h(val, res);
if (ret < 0)
return ret;
if (*res & 511)
return -EINVAL;
*res >>= 9;
if (*res < opt->min || *res >= opt->max)
return -ERANGE;
break; break;
case BCH_OPT_STR: case BCH_OPT_STR:
ret = match_string(opt->choices, -1, val); ret = match_string(opt->choices, -1, val);
@ -264,10 +267,12 @@ int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt,
if (!c) if (!c)
return 0; return 0;
return opt->parse(c, val, res); ret = opt->parse(c, val, res);
if (ret < 0)
return ret;
} }
return 0; return bch2_opt_validate(opt, msg, *res);
} }
void bch2_opt_to_text(struct printbuf *out, struct bch_fs *c, void bch2_opt_to_text(struct printbuf *out, struct bch_fs *c,
@ -288,11 +293,11 @@ void bch2_opt_to_text(struct printbuf *out, struct bch_fs *c,
switch (opt->type) { switch (opt->type) {
case BCH_OPT_BOOL: case BCH_OPT_BOOL:
case BCH_OPT_UINT: case BCH_OPT_UINT:
if (opt->flags & OPT_HUMAN_READABLE)
bch2_hprint(out, v);
else
pr_buf(out, "%lli", v); pr_buf(out, "%lli", v);
break; break;
case BCH_OPT_SECTORS:
bch2_hprint(out, v << 9);
break;
case BCH_OPT_STR: case BCH_OPT_STR:
if (flags & OPT_SHOW_FULL_LIST) if (flags & OPT_SHOW_FULL_LIST)
bch2_string_opt_to_text(out, opt->choices, v); bch2_string_opt_to_text(out, opt->choices, v);
@ -365,7 +370,8 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
if (id < 0) if (id < 0)
goto bad_opt; goto bad_opt;
ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v); ret = bch2_opt_parse(c, "mount option ",
&bch2_opt_table[id], val, &v);
if (ret < 0) if (ret < 0)
goto bad_val; goto bad_val;
} else { } else {
@ -385,7 +391,7 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
goto no_val; goto no_val;
} }
if (!(bch2_opt_table[id].mode & OPT_MOUNT)) if (!(bch2_opt_table[id].flags & OPT_MOUNT))
goto bad_opt; goto bad_opt;
if (id == Opt_acl && if (id == Opt_acl &&
@ -420,6 +426,65 @@ out:
return ret; return ret;
} }
/*
* Initial options from superblock - here we don't want any options undefined,
* any options the superblock doesn't specify are set to 0:
*/
int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
{
unsigned id;
int ret;
for (id = 0; id < bch2_opts_nr; id++) {
const struct bch_option *opt = bch2_opt_table + id;
u64 v;
if (opt->get_sb == NO_SB_OPT)
continue;
v = opt->get_sb(sb);
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = 1ULL << v;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v <<= 9;
ret = bch2_opt_validate(opt, "superblock option ", v);
if (ret)
return ret;
bch2_opt_set_by_id(opts, id, v);
}
return 0;
}
void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_NO_SB_OPT)
return;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = ilog2(v);
opt->set_sb(sb, v);
}
void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_NO_SB_OPT)
return;
mutex_lock(&c->sb_lock);
__bch2_opt_set_sb(c->disk_sb.sb, opt, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
/* io opts: */ /* io opts: */
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src) struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src)

View File

@ -44,19 +44,22 @@ static inline const char *bch2_d_type_str(unsigned d_type)
LE64_BITMASK(NO_SB_OPT, struct bch_sb, flags[0], 0, 0); LE64_BITMASK(NO_SB_OPT, struct bch_sb, flags[0], 0, 0);
/* When can be set: */ /* When can be set: */
enum opt_mode { enum opt_flags {
OPT_FS = (1 << 0), /* Filesystem option */ OPT_FS = (1 << 0), /* Filesystem option */
OPT_DEVICE = (1 << 1), /* Device option */ OPT_DEVICE = (1 << 1), /* Device option */
OPT_INODE = (1 << 2), /* Inode option */ OPT_INODE = (1 << 2), /* Inode option */
OPT_FORMAT = (1 << 3), /* May be specified at format time */ OPT_FORMAT = (1 << 3), /* May be specified at format time */
OPT_MOUNT = (1 << 4), /* May be specified at mount time */ OPT_MOUNT = (1 << 4), /* May be specified at mount time */
OPT_RUNTIME = (1 << 5), /* May be specified at runtime */ OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
OPT_HUMAN_READABLE = (1 << 6),
OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
}; };
enum opt_type { enum opt_type {
BCH_OPT_BOOL, BCH_OPT_BOOL,
BCH_OPT_UINT, BCH_OPT_UINT,
BCH_OPT_SECTORS,
BCH_OPT_STR, BCH_OPT_STR,
BCH_OPT_FN, BCH_OPT_FN,
}; };
@ -88,13 +91,15 @@ enum opt_type {
#define BCH_OPTS() \ #define BCH_OPTS() \
x(block_size, u16, \ x(block_size, u16, \
OPT_FS|OPT_FORMAT, \ OPT_FS|OPT_FORMAT| \
OPT_SECTORS(1, 128), \ OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 16), \
BCH_SB_BLOCK_SIZE, 8, \ BCH_SB_BLOCK_SIZE, 8, \
"size", NULL) \ "size", NULL) \
x(btree_node_size, u16, \ x(btree_node_size, u32, \
OPT_FS|OPT_FORMAT, \ OPT_FS|OPT_FORMAT| \
OPT_SECTORS(1, 512), \ OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 20), \
BCH_SB_BTREE_NODE_SIZE, 512, \ BCH_SB_BTREE_NODE_SIZE, 512, \
"size", "Btree node size, default 256k") \ "size", "Btree node size, default 256k") \
x(errors, u8, \ x(errors, u8, \
@ -198,8 +203,9 @@ enum opt_type {
BCH_SB_GC_RESERVE, 8, \ BCH_SB_GC_RESERVE, 8, \
"%", "Percentage of disk space to reserve for copygc")\ "%", "Percentage of disk space to reserve for copygc")\
x(gc_reserve_bytes, u64, \ x(gc_reserve_bytes, u64, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME| \
OPT_SECTORS(0, U64_MAX), \ OPT_HUMAN_READABLE|OPT_SB_FIELD_SECTORS, \
OPT_UINT(0, U64_MAX), \
BCH_SB_GC_RESERVE_BYTES, 0, \ BCH_SB_GC_RESERVE_BYTES, 0, \
"%", "Amount of disk space to reserve for copygc\n" \ "%", "Amount of disk space to reserve for copygc\n" \
"Takes precedence over gc_reserve_percent if set")\ "Takes precedence over gc_reserve_percent if set")\
@ -360,12 +366,12 @@ enum opt_type {
"for performance testing purposes") \ "for performance testing purposes") \
x(fs_size, u64, \ x(fs_size, u64, \
OPT_DEVICE, \ OPT_DEVICE, \
OPT_SECTORS(0, S64_MAX), \ OPT_UINT(0, S64_MAX), \
NO_SB_OPT, 0, \ NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \ "size", "Size of filesystem on device") \
x(bucket, u32, \ x(bucket, u32, \
OPT_DEVICE, \ OPT_DEVICE, \
OPT_SECTORS(0, S64_MAX), \ OPT_UINT(0, S64_MAX), \
NO_SB_OPT, 0, \ NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \ "size", "Size of filesystem on device") \
x(durability, u8, \ x(durability, u8, \
@ -424,13 +430,14 @@ struct printbuf;
struct bch_option { struct bch_option {
struct attribute attr; struct attribute attr;
u64 (*get_sb)(const struct bch_sb *);
void (*set_sb)(struct bch_sb *, u64); void (*set_sb)(struct bch_sb *, u64);
enum opt_mode mode;
enum opt_type type; enum opt_type type;
enum opt_flags flags;
u64 min, max;
union { union {
struct { struct {
u64 min, max;
}; };
struct { struct {
const char * const *choices; const char * const *choices;
@ -452,10 +459,13 @@ bool bch2_opt_defined_by_id(const struct bch_opts *, enum bch_opt_id);
u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id); u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id);
void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64); void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
struct bch_opts bch2_opts_from_sb(struct bch_sb *); int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
int bch2_opt_lookup(const char *); int bch2_opt_lookup(const char *);
int bch2_opt_parse(struct bch_fs *, const struct bch_option *, const char *, u64 *); int bch2_opt_parse(struct bch_fs *, const char *, const struct bch_option *,
const char *, u64 *);
#define OPT_SHOW_FULL_LIST (1 << 0) #define OPT_SHOW_FULL_LIST (1 << 0)
#define OPT_SHOW_MOUNT_STYLE (1 << 1) #define OPT_SHOW_MOUNT_STYLE (1 << 1)

View File

@ -267,8 +267,7 @@ const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
block_size = le16_to_cpu(sb->block_size); block_size = le16_to_cpu(sb->block_size);
if (!is_power_of_2(block_size) || if (block_size > PAGE_SECTORS)
block_size > PAGE_SECTORS)
return "Bad block size"; return "Bad block size";
if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid)))
@ -310,9 +309,6 @@ const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
if (!BCH_SB_BTREE_NODE_SIZE(sb)) if (!BCH_SB_BTREE_NODE_SIZE(sb))
return "Btree node size not set"; return "Btree node size not set";
if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
return "Btree node size not a power of two";
if (BCH_SB_GC_RESERVE(sb) < 5) if (BCH_SB_GC_RESERVE(sb) < 5)
return "gc reserve percentage too small"; return "gc reserve percentage too small";
@ -627,8 +623,12 @@ got_super:
err = "Superblock block size smaller than device block size"; err = "Superblock block size smaller than device block size";
ret = -EINVAL; ret = -EINVAL;
if (le16_to_cpu(sb->sb->block_size) << 9 < if (le16_to_cpu(sb->sb->block_size) << 9 <
bdev_logical_block_size(sb->bdev)) bdev_logical_block_size(sb->bdev)) {
goto err; pr_err("error reading superblock: Superblock block size (%u) smaller than device block size (%u)",
le16_to_cpu(sb->sb->block_size) << 9,
bdev_logical_block_size(sb->bdev));
goto err_no_print;
}
ret = 0; ret = 0;
sb->have_layout = true; sb->have_layout = true;
@ -636,8 +636,9 @@ out:
pr_verbose_init(*opts, "ret %i", ret); pr_verbose_init(*opts, "ret %i", ret);
return ret; return ret;
err: err:
bch2_free_super(sb);
pr_err("error reading superblock: %s", err); pr_err("error reading superblock: %s", err);
err_no_print:
bch2_free_super(sb);
goto out; goto out;
} }

View File

@ -762,10 +762,13 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100); SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
c->opts = bch2_opts_default; c->opts = bch2_opts_default;
bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb)); ret = bch2_opts_from_sb(&c->opts, sb);
if (ret)
goto err;
bch2_opts_apply(&c->opts, opts); bch2_opts_apply(&c->opts, opts);
c->block_bits = ilog2(c->opts.block_size); c->block_bits = ilog2(block_sectors(c));
c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c); c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
if (bch2_fs_init_fault("fs_alloc")) { if (bch2_fs_init_fault("fs_alloc")) {
@ -877,7 +880,7 @@ static void print_mount_opts(struct bch_fs *c)
const struct bch_option *opt = &bch2_opt_table[i]; const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i); u64 v = bch2_opt_get_by_id(&c->opts, i);
if (!(opt->mode & OPT_MOUNT)) if (!(opt->flags & OPT_MOUNT))
continue; continue;
if (v == bch2_opt_get_by_id(&bch2_opts_default, i)) if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
@ -1003,7 +1006,7 @@ static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
if (!sb_mi) if (!sb_mi)
return "Invalid superblock: member info area missing"; return "Invalid superblock: member info area missing";
if (le16_to_cpu(sb->block_size) != c->opts.block_size) if (le16_to_cpu(sb->block_size) != block_sectors(c))
return "mismatched block size"; return "mismatched block size";
if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) < if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <

View File

@ -626,7 +626,7 @@ STORE(bch2_fs_opts_dir)
if (!tmp) if (!tmp)
return -ENOMEM; return -ENOMEM;
ret = bch2_opt_parse(c, opt, strim(tmp), &v); ret = bch2_opt_parse(c, NULL, opt, strim(tmp), &v);
kfree(tmp); kfree(tmp);
if (ret < 0) if (ret < 0)
@ -636,13 +636,7 @@ STORE(bch2_fs_opts_dir)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (opt->set_sb != SET_NO_SB_OPT) { bch2_opt_set_sb(c, opt, v);
mutex_lock(&c->sb_lock);
opt->set_sb(c->disk_sb.sb, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
bch2_opt_set_by_id(&c->opts, id, v); bch2_opt_set_by_id(&c->opts, id, v);
if ((id == Opt_background_target || if ((id == Opt_background_target ||
@ -665,7 +659,7 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
for (i = bch2_opt_table; for (i = bch2_opt_table;
i < bch2_opt_table + bch2_opts_nr; i < bch2_opt_table + bch2_opts_nr;
i++) { i++) {
if (!(i->mode & OPT_FS)) if (!(i->flags & OPT_FS))
continue; continue;
ret = sysfs_create_file(kobj, &i->attr); ret = sysfs_create_file(kobj, &i->attr);

View File

@ -525,7 +525,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
memcpy(buf, value, size); memcpy(buf, value, size);
buf[size] = '\0'; buf[size] = '\0';
ret = bch2_opt_parse(c, opt, buf, &v); ret = bch2_opt_parse(c, NULL, opt, buf, &v);
kfree(buf); kfree(buf);
if (ret < 0) if (ret < 0)