2017-03-16 23:18:50 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
/*
|
|
|
|
* bcachefs setup/teardown code, and some metadata io - read a superblock and
|
|
|
|
* figure out what to do with it.
|
|
|
|
*
|
|
|
|
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
|
|
|
* Copyright 2012 Google, Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "bcachefs.h"
|
2018-10-05 21:46:55 -07:00
|
|
|
#include "alloc_background.h"
|
|
|
|
#include "alloc_foreground.h"
|
2018-11-27 16:30:56 -07:00
|
|
|
#include "bkey_sort.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "btree_cache.h"
|
|
|
|
#include "btree_gc.h"
|
2023-08-05 13:08:44 -07:00
|
|
|
#include "btree_journal_iter.h"
|
2019-03-07 17:46:10 -07:00
|
|
|
#include "btree_key_cache.h"
|
2024-03-11 20:11:46 -07:00
|
|
|
#include "btree_node_scan.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "btree_update_interior.h"
|
|
|
|
#include "btree_io.h"
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-03 22:00:50 -07:00
|
|
|
#include "btree_write_buffer.h"
|
2022-01-04 20:32:09 -07:00
|
|
|
#include "buckets_waiting_for_journal.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "chardev.h"
|
|
|
|
#include "checksum.h"
|
|
|
|
#include "clock.h"
|
|
|
|
#include "compress.h"
|
|
|
|
#include "debug.h"
|
2023-11-09 12:22:46 -07:00
|
|
|
#include "disk_accounting.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "disk_groups.h"
|
2018-11-01 12:13:19 -07:00
|
|
|
#include "ec.h"
|
2022-07-18 16:42:58 -07:00
|
|
|
#include "errcode.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "error.h"
|
|
|
|
#include "fs.h"
|
|
|
|
#include "fs-io.h"
|
2023-08-03 15:18:21 -07:00
|
|
|
#include "fs-io-buffered.h"
|
|
|
|
#include "fs-io-direct.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "fsck.h"
|
|
|
|
#include "inode.h"
|
2023-09-10 15:05:17 -07:00
|
|
|
#include "io_read.h"
|
|
|
|
#include "io_write.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "journal.h"
|
|
|
|
#include "journal_reclaim.h"
|
2019-04-04 18:53:12 -07:00
|
|
|
#include "journal_seq_blacklist.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "move.h"
|
|
|
|
#include "migrate.h"
|
|
|
|
#include "movinggc.h"
|
2022-12-14 18:52:11 -07:00
|
|
|
#include "nocow_locking.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "quota.h"
|
|
|
|
#include "rebalance.h"
|
|
|
|
#include "recovery.h"
|
|
|
|
#include "replicas.h"
|
2023-08-05 12:54:38 -07:00
|
|
|
#include "sb-clean.h"
|
2024-01-20 21:46:35 -07:00
|
|
|
#include "sb-counters.h"
|
2023-10-25 12:51:16 -07:00
|
|
|
#include "sb-errors.h"
|
2023-09-24 20:55:37 -07:00
|
|
|
#include "sb-members.h"
|
2023-08-16 13:54:33 -07:00
|
|
|
#include "snapshot.h"
|
2021-03-15 21:42:25 -07:00
|
|
|
#include "subvolume.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "super.h"
|
|
|
|
#include "super-io.h"
|
|
|
|
#include "sysfs.h"
|
2024-02-04 20:20:40 -07:00
|
|
|
#include "thread_with_file.h"
|
2017-03-16 23:18:50 -07:00
|
|
|
#include "trace.h"
|
|
|
|
|
|
|
|
#include <linux/backing-dev.h>
|
|
|
|
#include <linux/blkdev.h>
|
|
|
|
#include <linux/debugfs.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/idr.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
#include <linux/sysfs.h>
|
|
|
|
#include <crypto/hash.h>
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
|
2023-09-13 07:14:30 -07:00
|
|
|
MODULE_DESCRIPTION("bcachefs filesystem");
|
2023-12-04 23:10:28 -07:00
|
|
|
MODULE_SOFTDEP("pre: crc32c");
|
|
|
|
MODULE_SOFTDEP("pre: crc64");
|
|
|
|
MODULE_SOFTDEP("pre: sha256");
|
|
|
|
MODULE_SOFTDEP("pre: chacha20");
|
|
|
|
MODULE_SOFTDEP("pre: poly1305");
|
|
|
|
MODULE_SOFTDEP("pre: xxhash");
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
const char * const bch2_fs_flag_strs[] = {
|
|
|
|
#define x(n) #n,
|
|
|
|
BCH_FS_FLAGS()
|
|
|
|
#undef x
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
2024-05-29 19:06:00 -07:00
|
|
|
void bch2_print_str(struct bch_fs *c, const char *str)
|
|
|
|
{
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
|
|
|
|
|
|
|
|
if (unlikely(stdio)) {
|
|
|
|
bch2_stdio_redirect_printf(stdio, true, "%s", str);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
bch2_print_string_as_lines(KERN_ERR, str);
|
|
|
|
}
|
|
|
|
|
2024-03-17 18:51:19 -07:00
|
|
|
__printf(2, 0)
|
2024-03-16 16:36:11 -07:00
|
|
|
static void bch2_print_maybe_redirect(struct stdio_redirect *stdio, const char *fmt, va_list args)
|
2024-02-12 15:15:29 -07:00
|
|
|
{
|
2024-03-16 16:36:11 -07:00
|
|
|
#ifdef __KERNEL__
|
|
|
|
if (unlikely(stdio)) {
|
2024-02-12 15:15:29 -07:00
|
|
|
if (fmt[0] == KERN_SOH[0])
|
|
|
|
fmt += 2;
|
|
|
|
|
|
|
|
bch2_stdio_redirect_vprintf(stdio, true, fmt, args);
|
2024-03-16 16:36:11 -07:00
|
|
|
return;
|
2024-02-12 15:15:29 -07:00
|
|
|
}
|
2024-03-16 16:36:11 -07:00
|
|
|
#endif
|
|
|
|
vprintk(fmt, args);
|
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_print_opts(struct bch_opts *opts, const char *fmt, ...)
|
|
|
|
{
|
|
|
|
struct stdio_redirect *stdio = (void *)(unsigned long)opts->stdio;
|
|
|
|
|
|
|
|
va_list args;
|
|
|
|
va_start(args, fmt);
|
|
|
|
bch2_print_maybe_redirect(stdio, fmt, args);
|
2024-02-12 15:15:29 -07:00
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
2023-12-04 18:15:23 -07:00
|
|
|
void __bch2_print(struct bch_fs *c, const char *fmt, ...)
|
|
|
|
{
|
2023-12-31 08:04:54 -07:00
|
|
|
struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
|
2023-12-04 18:15:23 -07:00
|
|
|
|
2023-12-31 08:04:54 -07:00
|
|
|
va_list args;
|
2023-12-04 18:15:23 -07:00
|
|
|
va_start(args, fmt);
|
2024-03-16 16:36:11 -07:00
|
|
|
bch2_print_maybe_redirect(stdio, fmt, args);
|
2023-12-04 18:15:23 -07:00
|
|
|
va_end(args);
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
#define KTYPE(type) \
|
|
|
|
static const struct attribute_group type ## _group = { \
|
|
|
|
.attrs = type ## _files \
|
|
|
|
}; \
|
|
|
|
\
|
|
|
|
static const struct attribute_group *type ## _groups[] = { \
|
|
|
|
&type ## _group, \
|
|
|
|
NULL \
|
|
|
|
}; \
|
|
|
|
\
|
|
|
|
static const struct kobj_type type ## _ktype = { \
|
|
|
|
.release = type ## _release, \
|
|
|
|
.sysfs_ops = &type ## _sysfs_ops, \
|
|
|
|
.default_groups = type ## _groups \
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_release(struct kobject *);
|
|
|
|
static void bch2_dev_release(struct kobject *);
|
2022-03-15 01:36:33 -07:00
|
|
|
static void bch2_fs_counters_release(struct kobject *k)
|
|
|
|
{
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
static void bch2_fs_internal_release(struct kobject *k)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_opts_dir_release(struct kobject *k)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_time_stats_release(struct kobject *k)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
KTYPE(bch2_fs);
|
2022-03-15 01:36:33 -07:00
|
|
|
KTYPE(bch2_fs_counters);
|
2017-03-16 23:18:50 -07:00
|
|
|
KTYPE(bch2_fs_internal);
|
|
|
|
KTYPE(bch2_fs_opts_dir);
|
|
|
|
KTYPE(bch2_fs_time_stats);
|
|
|
|
KTYPE(bch2_dev);
|
|
|
|
|
|
|
|
static struct kset *bcachefs_kset;
|
|
|
|
static LIST_HEAD(bch_fs_list);
|
|
|
|
static DEFINE_MUTEX(bch_fs_list_lock);
|
|
|
|
|
2023-02-09 10:21:45 -07:00
|
|
|
DECLARE_WAIT_QUEUE_HEAD(bch2_read_only_wait);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-10-12 11:36:38 -07:00
|
|
|
static void bch2_dev_unlink(struct bch_dev *);
|
2017-03-16 23:18:50 -07:00
|
|
|
static void bch2_dev_free(struct bch_dev *);
|
|
|
|
static int bch2_dev_alloc(struct bch_fs *, unsigned);
|
|
|
|
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
|
|
|
|
static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
|
|
|
|
|
|
|
|
struct bch_fs *bch2_dev_to_fs(dev_t dev)
|
|
|
|
{
|
|
|
|
struct bch_fs *c;
|
|
|
|
|
|
|
|
mutex_lock(&bch_fs_list_lock);
|
|
|
|
rcu_read_lock();
|
|
|
|
|
|
|
|
list_for_each_entry(c, &bch_fs_list, list)
|
2023-12-17 00:34:05 -07:00
|
|
|
for_each_member_device_rcu(c, ca, NULL)
|
2021-05-12 11:07:57 -07:00
|
|
|
if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
|
2017-03-16 23:18:50 -07:00
|
|
|
closure_get(&c->cl);
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
c = NULL;
|
|
|
|
found:
|
|
|
|
rcu_read_unlock();
|
|
|
|
mutex_unlock(&bch_fs_list_lock);
|
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
|
|
|
|
{
|
|
|
|
struct bch_fs *c;
|
|
|
|
|
|
|
|
lockdep_assert_held(&bch_fs_list_lock);
|
|
|
|
|
|
|
|
list_for_each_entry(c, &bch_fs_list, list)
|
|
|
|
if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
|
|
|
|
return c;
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
|
|
|
|
{
|
|
|
|
struct bch_fs *c;
|
|
|
|
|
|
|
|
mutex_lock(&bch_fs_list_lock);
|
|
|
|
c = __bch2_uuid_to_fs(uuid);
|
|
|
|
if (c)
|
|
|
|
closure_get(&c->cl);
|
|
|
|
mutex_unlock(&bch_fs_list_lock);
|
|
|
|
|
|
|
|
return c;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Filesystem RO/RW: */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For startup/shutdown of RW stuff, the dependencies are:
|
|
|
|
*
|
|
|
|
* - foreground writes depend on copygc and rebalance (to free up space)
|
|
|
|
*
|
|
|
|
* - copygc and rebalance depend on mark and sweep gc (they actually probably
|
|
|
|
* don't because they either reserve ahead of time or don't block if
|
|
|
|
* allocations fail, but allocations can require mark and sweep gc to run
|
|
|
|
* because of generation number wraparound)
|
|
|
|
*
|
|
|
|
* - all of the above depends on the allocator threads
|
|
|
|
*
|
|
|
|
* - allocator depends on the journal (when it rewrites prios and gens)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void __bch2_fs_read_only(struct bch_fs *c)
|
|
|
|
{
|
2023-12-16 21:47:29 -07:00
|
|
|
unsigned clean_passes = 0;
|
2022-04-17 14:30:49 -07:00
|
|
|
u64 seq = 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-03-13 19:01:47 -07:00
|
|
|
bch2_fs_ec_stop(c);
|
|
|
|
bch2_open_buckets_stop(c, NULL, true);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_rebalance_stop(c);
|
2020-07-11 13:28:54 -07:00
|
|
|
bch2_copygc_stop(c);
|
2023-03-13 19:01:47 -07:00
|
|
|
bch2_fs_ec_flush(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-03-07 05:28:20 -07:00
|
|
|
bch_verbose(c, "flushing journal and stopping allocators, journal seq %llu",
|
|
|
|
journal_cur_seq(&c->journal));
|
2019-04-17 15:14:46 -07:00
|
|
|
|
2020-05-28 13:06:13 -07:00
|
|
|
do {
|
|
|
|
clean_passes++;
|
2018-11-18 23:31:41 -07:00
|
|
|
|
2022-04-17 14:30:49 -07:00
|
|
|
if (bch2_btree_interior_updates_flush(c) ||
|
2024-11-07 19:48:33 -07:00
|
|
|
bch2_btree_write_buffer_flush_going_ro(c) ||
|
2022-04-17 14:30:49 -07:00
|
|
|
bch2_journal_flush_all_pins(&c->journal) ||
|
|
|
|
bch2_btree_flush_all_writes(c) ||
|
|
|
|
seq != atomic64_read(&c->journal.seq)) {
|
|
|
|
seq = atomic64_read(&c->journal.seq);
|
2020-05-28 13:06:13 -07:00
|
|
|
clean_passes = 0;
|
|
|
|
}
|
2019-03-28 00:40:39 -07:00
|
|
|
} while (clean_passes < 2);
|
2020-05-24 10:37:44 -07:00
|
|
|
|
2023-03-07 05:28:20 -07:00
|
|
|
bch_verbose(c, "flushing journal and stopping allocators complete, journal seq %llu",
|
|
|
|
journal_cur_seq(&c->journal));
|
2020-05-25 11:57:06 -07:00
|
|
|
|
2024-04-30 03:20:37 -07:00
|
|
|
if (test_bit(JOURNAL_replay_done, &c->journal.flags) &&
|
2023-11-26 15:05:02 -07:00
|
|
|
!test_bit(BCH_FS_emergency_ro, &c->flags))
|
|
|
|
set_bit(BCH_FS_clean_shutdown, &c->flags);
|
2024-02-20 19:08:24 -07:00
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_journal_stop(&c->journal);
|
|
|
|
|
2024-02-20 19:08:24 -07:00
|
|
|
bch_info(c, "%sshutdown complete, journal seq %llu",
|
|
|
|
test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un",
|
|
|
|
c->journal.seq_ondisk);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
/*
|
|
|
|
* After stopping journal:
|
|
|
|
*/
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_member_device(c, ca)
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_allocator_remove(c, ca);
|
|
|
|
}
|
|
|
|
|
2023-02-09 10:21:45 -07:00
|
|
|
#ifndef BCH_WRITE_REF_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
static void bch2_writes_disabled(struct percpu_ref *writes)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(writes, struct bch_fs, writes);
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
set_bit(BCH_FS_write_disable_complete, &c->flags);
|
2023-02-09 10:21:45 -07:00
|
|
|
wake_up(&bch2_read_only_wait);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
2023-02-09 10:21:45 -07:00
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
void bch2_fs_read_only(struct bch_fs *c)
|
|
|
|
{
|
2023-11-26 15:05:02 -07:00
|
|
|
if (!test_bit(BCH_FS_rw, &c->flags)) {
|
2021-06-21 13:30:52 -07:00
|
|
|
bch2_journal_reclaim_stop(&c->journal);
|
2017-03-16 23:18:50 -07:00
|
|
|
return;
|
2019-03-21 19:19:57 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
BUG_ON(test_bit(BCH_FS_write_disable_complete, &c->flags));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-22 16:30:43 -07:00
|
|
|
bch_verbose(c, "going read-only");
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
/*
|
|
|
|
* Block new foreground-end write operations from starting - any new
|
|
|
|
* writes will return -EROFS:
|
|
|
|
*/
|
2023-11-26 15:05:02 -07:00
|
|
|
set_bit(BCH_FS_going_ro, &c->flags);
|
2023-02-09 10:21:45 -07:00
|
|
|
#ifndef BCH_WRITE_REF_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
percpu_ref_kill(&c->writes);
|
2023-02-09 10:21:45 -07:00
|
|
|
#else
|
|
|
|
for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++)
|
|
|
|
bch2_write_ref_put(c, i);
|
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're not doing an emergency shutdown, we want to wait on
|
|
|
|
* outstanding writes to complete so they don't see spurious errors due
|
|
|
|
* to shutting down the allocator:
|
|
|
|
*
|
|
|
|
* If we are doing an emergency shutdown outstanding writes may
|
|
|
|
* hang until we shutdown the allocator so we don't want to wait
|
|
|
|
* on outstanding writes before shutting everything down - but
|
|
|
|
* we do need to wait on them before returning and signalling
|
|
|
|
* that going RO is complete:
|
|
|
|
*/
|
2023-02-09 10:21:45 -07:00
|
|
|
wait_event(bch2_read_only_wait,
|
2023-11-26 15:05:02 -07:00
|
|
|
test_bit(BCH_FS_write_disable_complete, &c->flags) ||
|
|
|
|
test_bit(BCH_FS_emergency_ro, &c->flags));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
bool writes_disabled = test_bit(BCH_FS_write_disable_complete, &c->flags);
|
2023-11-22 16:30:43 -07:00
|
|
|
if (writes_disabled)
|
|
|
|
bch_verbose(c, "finished waiting for writes to stop");
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
__bch2_fs_read_only(c);
|
|
|
|
|
2023-02-09 10:21:45 -07:00
|
|
|
wait_event(bch2_read_only_wait,
|
2023-11-26 15:05:02 -07:00
|
|
|
test_bit(BCH_FS_write_disable_complete, &c->flags));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-22 16:30:43 -07:00
|
|
|
if (!writes_disabled)
|
|
|
|
bch_verbose(c, "finished waiting for writes to stop");
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
clear_bit(BCH_FS_write_disable_complete, &c->flags);
|
|
|
|
clear_bit(BCH_FS_going_ro, &c->flags);
|
|
|
|
clear_bit(BCH_FS_rw, &c->flags);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (!bch2_journal_error(&c->journal) &&
|
2023-11-26 15:05:02 -07:00
|
|
|
!test_bit(BCH_FS_error, &c->flags) &&
|
|
|
|
!test_bit(BCH_FS_emergency_ro, &c->flags) &&
|
|
|
|
test_bit(BCH_FS_started, &c->flags) &&
|
|
|
|
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
|
2024-03-28 18:34:14 -07:00
|
|
|
c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) {
|
2023-06-11 16:21:16 -07:00
|
|
|
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
|
2024-09-05 16:25:01 -07:00
|
|
|
BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
|
2023-06-11 16:21:16 -07:00
|
|
|
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
|
2023-11-02 15:57:19 -07:00
|
|
|
BUG_ON(c->btree_write_buffer.inc.keys.nr);
|
|
|
|
BUG_ON(c->btree_write_buffer.flushing.keys.nr);
|
2024-02-17 22:13:22 -07:00
|
|
|
bch2_verify_accounting_clean(c);
|
2023-06-11 16:21:16 -07:00
|
|
|
|
2020-05-24 11:06:10 -07:00
|
|
|
bch_verbose(c, "marking filesystem clean");
|
2019-03-21 19:19:57 -07:00
|
|
|
bch2_fs_mark_clean(c);
|
2023-11-22 16:30:43 -07:00
|
|
|
} else {
|
|
|
|
bch_verbose(c, "done going read-only, filesystem not clean");
|
2020-05-24 11:06:10 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_read_only_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct bch_fs *c =
|
|
|
|
container_of(work, struct bch_fs, read_only_work);
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_read_only(c);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_read_only_async(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
queue_work(system_long_wq, &c->read_only_work);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool bch2_fs_emergency_read_only(struct bch_fs *c)
|
|
|
|
{
|
2023-11-26 15:05:02 -07:00
|
|
|
bool ret = !test_and_set_bit(BCH_FS_emergency_ro, &c->flags);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_journal_halt(&c->journal);
|
2020-08-04 20:10:08 -07:00
|
|
|
bch2_fs_read_only_async(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-02-09 10:21:45 -07:00
|
|
|
wake_up(&bch2_read_only_wait);
|
2017-03-16 23:18:50 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-21 19:19:57 -07:00
|
|
|
static int bch2_fs_read_write_late(struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2019-03-21 19:19:57 -07:00
|
|
|
int ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-07-10 14:23:59 -07:00
|
|
|
/*
|
|
|
|
* Data move operations can't run until after check_snapshots has
|
|
|
|
* completed, and bch2_snapshot_is_ancestor() is available.
|
|
|
|
*
|
|
|
|
* Ideally we'd start copygc/rebalance earlier instead of waiting for
|
|
|
|
* all of recovery/fsck to complete:
|
|
|
|
*/
|
|
|
|
ret = bch2_copygc_start(c);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error starting copygc thread");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-21 19:19:57 -07:00
|
|
|
ret = bch2_rebalance_start(c);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error starting rebalance thread");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2019-12-26 12:54:43 -07:00
|
|
|
static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
2019-03-21 19:19:57 -07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
|
2021-04-24 13:32:35 -07:00
|
|
|
bch_err(c, "cannot go rw, unfixed btree errors");
|
2023-07-07 14:09:26 -07:00
|
|
|
return -BCH_ERR_erofs_unfixed_errors;
|
2021-04-24 13:32:35 -07:00
|
|
|
}
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
if (test_bit(BCH_FS_rw, &c->flags))
|
2019-03-21 19:19:57 -07:00
|
|
|
return 0;
|
|
|
|
|
2021-03-19 10:23:01 -07:00
|
|
|
bch_info(c, "going read-write");
|
|
|
|
|
2023-10-25 12:51:16 -07:00
|
|
|
ret = bch2_sb_members_v2_init(c);
|
2023-09-24 21:02:56 -07:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2019-03-21 19:19:57 -07:00
|
|
|
ret = bch2_fs_mark_dirty(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
clear_bit(BCH_FS_clean_shutdown, &c->flags);
|
2020-05-24 10:37:44 -07:00
|
|
|
|
2022-12-02 09:45:58 -07:00
|
|
|
/*
|
|
|
|
* First journal write must be a flush write: after a clean shutdown we
|
|
|
|
* don't read the journal, so the first journal write may end up
|
|
|
|
* overwriting whatever was there previously, and there must always be
|
|
|
|
* at least one non-flush write in the journal or recovery will fail:
|
|
|
|
*/
|
2024-04-30 03:20:37 -07:00
|
|
|
set_bit(JOURNAL_need_flush_write, &c->journal.flags);
|
|
|
|
set_bit(JOURNAL_running, &c->journal.flags);
|
2022-12-02 09:45:58 -07:00
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_rw_member(c, ca)
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_allocator_add(c, ca);
|
|
|
|
bch2_recalc_capacity(c);
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
set_bit(BCH_FS_rw, &c->flags);
|
|
|
|
set_bit(BCH_FS_was_rw, &c->flags);
|
2023-11-23 22:55:59 -07:00
|
|
|
|
|
|
|
#ifndef BCH_WRITE_REF_DEBUG
|
|
|
|
percpu_ref_reinit(&c->writes);
|
|
|
|
#else
|
2023-12-16 21:47:29 -07:00
|
|
|
for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
|
2023-11-23 22:55:59 -07:00
|
|
|
BUG_ON(atomic_long_read(&c->writes[i]));
|
|
|
|
atomic_long_inc(&c->writes[i]);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2023-08-30 03:45:59 -07:00
|
|
|
ret = bch2_journal_reclaim_start(&c->journal);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2019-03-21 19:19:57 -07:00
|
|
|
if (!early) {
|
|
|
|
ret = bch2_fs_read_write_late(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-02-09 10:22:58 -07:00
|
|
|
bch2_do_discards(c);
|
|
|
|
bch2_do_invalidates(c);
|
|
|
|
bch2_do_stripe_deletes(c);
|
2023-02-11 10:57:04 -07:00
|
|
|
bch2_do_pending_node_rewrites(c);
|
2019-03-21 19:19:57 -07:00
|
|
|
return 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
err:
|
2023-11-26 15:05:02 -07:00
|
|
|
if (test_bit(BCH_FS_rw, &c->flags))
|
2023-11-23 22:55:59 -07:00
|
|
|
bch2_fs_read_only(c);
|
|
|
|
else
|
|
|
|
__bch2_fs_read_only(c);
|
2019-03-21 19:19:57 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_fs_read_write(struct bch_fs *c)
|
|
|
|
{
|
2024-03-28 18:34:14 -07:00
|
|
|
if (c->opts.recovery_pass_last &&
|
|
|
|
c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
|
2023-12-23 15:50:29 -07:00
|
|
|
return -BCH_ERR_erofs_norecovery;
|
|
|
|
|
|
|
|
if (c->opts.nochanges)
|
|
|
|
return -BCH_ERR_erofs_nochanges;
|
|
|
|
|
2019-03-21 19:19:57 -07:00
|
|
|
return __bch2_fs_read_write(c, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_fs_read_write_early(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&c->state_lock);
|
|
|
|
|
|
|
|
return __bch2_fs_read_write(c, true);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Filesystem startup/shutdown: */
|
|
|
|
|
2020-09-08 15:30:32 -07:00
|
|
|
static void __bch2_fs_free(struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2024-04-05 21:07:46 -07:00
|
|
|
for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++)
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_time_stats_exit(&c->times[i]);
|
|
|
|
|
2024-03-11 20:11:46 -07:00
|
|
|
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
|
2023-02-11 10:57:04 -07:00
|
|
|
bch2_free_pending_node_rewrites(c);
|
2023-11-09 12:22:46 -07:00
|
|
|
bch2_fs_accounting_exit(c);
|
2023-10-25 12:51:16 -07:00
|
|
|
bch2_fs_sb_errors_exit(c);
|
2022-03-15 01:36:33 -07:00
|
|
|
bch2_fs_counters_exit(c);
|
2021-03-15 21:42:25 -07:00
|
|
|
bch2_fs_snapshots_exit(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_quota_exit(c);
|
2023-08-03 15:18:21 -07:00
|
|
|
bch2_fs_fs_io_direct_exit(c);
|
|
|
|
bch2_fs_fs_io_buffered_exit(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_fsio_exit(c);
|
2024-06-08 18:41:01 -07:00
|
|
|
bch2_fs_vfs_exit(c);
|
2018-11-01 12:13:19 -07:00
|
|
|
bch2_fs_ec_exit(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_encryption_exit(c);
|
2023-09-24 13:25:06 -07:00
|
|
|
bch2_fs_nocow_locking_exit(c);
|
2023-09-10 15:05:17 -07:00
|
|
|
bch2_fs_io_write_exit(c);
|
|
|
|
bch2_fs_io_read_exit(c);
|
2022-01-04 20:32:09 -07:00
|
|
|
bch2_fs_buckets_waiting_for_journal_exit(c);
|
2020-05-25 17:35:53 -07:00
|
|
|
bch2_fs_btree_interior_update_exit(c);
|
2019-03-07 17:46:10 -07:00
|
|
|
bch2_fs_btree_key_cache_exit(&c->btree_key_cache);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_btree_cache_exit(c);
|
2024-05-22 16:53:03 -07:00
|
|
|
bch2_fs_btree_iter_exit(c);
|
2021-04-23 21:24:25 -07:00
|
|
|
bch2_fs_replicas_exit(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_journal_exit(&c->journal);
|
|
|
|
bch2_io_clock_exit(&c->io_clock[WRITE]);
|
|
|
|
bch2_io_clock_exit(&c->io_clock[READ]);
|
|
|
|
bch2_fs_compress_exit(c);
|
2023-11-17 21:13:49 -07:00
|
|
|
bch2_journal_keys_put_initial(c);
|
2024-03-11 20:11:46 -07:00
|
|
|
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
|
2023-11-17 21:13:49 -07:00
|
|
|
BUG_ON(atomic_read(&c->journal_keys.ref));
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-03 22:00:50 -07:00
|
|
|
bch2_fs_btree_write_buffer_exit(c);
|
2018-11-25 22:13:33 -07:00
|
|
|
percpu_free_rwsem(&c->mark_lock);
|
2024-06-28 08:06:31 -07:00
|
|
|
if (c->online_reserved) {
|
|
|
|
u64 v = percpu_u64_get(c->online_reserved);
|
|
|
|
WARN(v, "online_reserved not 0 at shutdown: %lli", v);
|
|
|
|
free_percpu(c->online_reserved);
|
|
|
|
}
|
2020-11-05 18:02:01 -07:00
|
|
|
|
2023-06-28 19:09:13 -07:00
|
|
|
darray_exit(&c->btree_roots_extra);
|
2018-11-27 06:23:22 -07:00
|
|
|
free_percpu(c->pcpu);
|
2023-12-27 20:09:25 -07:00
|
|
|
free_percpu(c->usage);
|
2019-11-09 14:01:15 -07:00
|
|
|
mempool_exit(&c->large_bkey_pool);
|
2017-03-16 23:18:50 -07:00
|
|
|
mempool_exit(&c->btree_bounce_pool);
|
|
|
|
bioset_exit(&c->btree_bio);
|
|
|
|
mempool_exit(&c->fill_iter);
|
2023-02-09 10:21:45 -07:00
|
|
|
#ifndef BCH_WRITE_REF_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
percpu_ref_exit(&c->writes);
|
2023-02-09 10:21:45 -07:00
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
kfree(rcu_dereference_protected(c->disk_groups, 1));
|
2019-04-04 18:53:12 -07:00
|
|
|
kfree(c->journal_seq_blacklist_table);
|
2020-11-02 21:51:33 -07:00
|
|
|
kfree(c->unused_inode_hints);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-03-23 11:09:05 -07:00
|
|
|
if (c->write_ref_wq)
|
|
|
|
destroy_workqueue(c->write_ref_wq);
|
2024-06-05 08:08:20 -07:00
|
|
|
if (c->btree_write_submit_wq)
|
|
|
|
destroy_workqueue(c->btree_write_submit_wq);
|
|
|
|
if (c->btree_read_complete_wq)
|
|
|
|
destroy_workqueue(c->btree_read_complete_wq);
|
2017-03-16 23:18:50 -07:00
|
|
|
if (c->copygc_wq)
|
|
|
|
destroy_workqueue(c->copygc_wq);
|
2021-07-10 10:44:42 -07:00
|
|
|
if (c->btree_io_complete_wq)
|
|
|
|
destroy_workqueue(c->btree_io_complete_wq);
|
2021-05-22 14:37:25 -07:00
|
|
|
if (c->btree_update_wq)
|
|
|
|
destroy_workqueue(c->btree_update_wq);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-04-06 11:00:56 -07:00
|
|
|
bch2_free_super(&c->disk_sb);
|
2024-02-01 04:35:46 -07:00
|
|
|
kvfree(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
module_put(THIS_MODULE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_fs_release(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
|
|
|
|
2020-09-08 15:30:32 -07:00
|
|
|
__bch2_fs_free(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2020-09-08 15:30:32 -07:00
|
|
|
void __bch2_fs_stop(struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2018-07-21 00:56:57 -07:00
|
|
|
bch_verbose(c, "shutting down");
|
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
set_bit(BCH_FS_stopping, &c->flags);
|
2019-04-04 18:53:12 -07:00
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2020-02-27 13:03:53 -07:00
|
|
|
bch2_fs_read_only(c);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2020-02-27 13:03:53 -07:00
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_member_device(c, ca)
|
2024-10-12 11:36:38 -07:00
|
|
|
bch2_dev_unlink(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (c->kobj.state_in_sysfs)
|
|
|
|
kobject_del(&c->kobj);
|
|
|
|
|
|
|
|
bch2_fs_debug_exit(c);
|
|
|
|
bch2_fs_chardev_exit(c);
|
|
|
|
|
2023-12-06 14:26:18 -07:00
|
|
|
bch2_ro_ref_put(c);
|
|
|
|
wait_event(c->ro_ref_wait, !refcount_read(&c->ro_ref));
|
|
|
|
|
2022-03-15 01:36:33 -07:00
|
|
|
kobject_put(&c->counters_kobj);
|
2017-03-16 23:18:50 -07:00
|
|
|
kobject_put(&c->time_stats);
|
|
|
|
kobject_put(&c->opts_dir);
|
|
|
|
kobject_put(&c->internal);
|
|
|
|
|
|
|
|
/* btree prefetch might have kicked off reads in the background: */
|
|
|
|
bch2_btree_flush_all_reads(c);
|
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_member_device(c, ca)
|
2017-03-16 23:18:50 -07:00
|
|
|
cancel_work_sync(&ca->io_error_work);
|
|
|
|
|
|
|
|
cancel_work_sync(&c->read_only_work);
|
2020-09-08 15:30:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void bch2_fs_free(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
unsigned i;
|
|
|
|
|
|
|
|
mutex_lock(&bch_fs_list_lock);
|
|
|
|
list_del(&c->list);
|
|
|
|
mutex_unlock(&bch_fs_list_lock);
|
|
|
|
|
|
|
|
closure_sync(&c->cl);
|
|
|
|
closure_debug_destroy(&c->cl);
|
|
|
|
|
|
|
|
for (i = 0; i < c->sb.nr_devices; i++) {
|
|
|
|
struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-09-08 15:30:32 -07:00
|
|
|
if (ca) {
|
2024-05-03 15:07:40 -07:00
|
|
|
EBUG_ON(atomic_long_read(&ca->ref) != 1);
|
2020-09-08 15:30:32 -07:00
|
|
|
bch2_free_super(&ca->disk_sb);
|
|
|
|
bch2_dev_free(ca);
|
|
|
|
}
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2018-07-21 00:56:57 -07:00
|
|
|
bch_verbose(c, "shutdown complete");
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
kobject_put(&c->kobj);
|
|
|
|
}
|
|
|
|
|
2020-09-08 15:30:32 -07:00
|
|
|
void bch2_fs_stop(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
__bch2_fs_stop(c);
|
|
|
|
bch2_fs_free(c);
|
|
|
|
}
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
static int bch2_fs_online(struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2021-11-05 18:28:17 -07:00
|
|
|
int ret = 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
lockdep_assert_held(&bch_fs_list_lock);
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
if (__bch2_uuid_to_fs(c->sb.uuid)) {
|
|
|
|
bch_err(c, "filesystem UUID already open");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ret = bch2_fs_chardev_init(c);
|
2021-11-05 18:28:17 -07:00
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error creating character device");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_fs_debug_init(c);
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = kobject_add(&c->kobj, NULL, "%pU", c->sb.user_uuid.b) ?:
|
|
|
|
kobject_add(&c->internal, &c->kobj, "internal") ?:
|
|
|
|
kobject_add(&c->opts_dir, &c->kobj, "options") ?:
|
2023-11-09 20:07:42 -07:00
|
|
|
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
|
2021-11-05 18:28:17 -07:00
|
|
|
kobject_add(&c->time_stats, &c->kobj, "time_stats") ?:
|
2023-11-09 20:07:42 -07:00
|
|
|
#endif
|
2022-03-15 01:36:33 -07:00
|
|
|
kobject_add(&c->counters_kobj, &c->kobj, "counters") ?:
|
2021-11-05 18:28:17 -07:00
|
|
|
bch2_opts_create_sysfs_files(&c->opts_dir);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error creating sysfs objects");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_member_device(c, ca) {
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = bch2_dev_sysfs_online(c, ca);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error creating sysfs objects");
|
2024-05-03 14:39:16 -07:00
|
|
|
bch2_dev_put(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
2021-05-07 17:43:43 -07:00
|
|
|
}
|
2021-11-05 18:28:17 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
BUG_ON(!list_empty(&c->list));
|
2017-03-16 23:18:50 -07:00
|
|
|
list_add(&c->list, &bch_fs_list);
|
|
|
|
err:
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2021-11-05 18:28:17 -07:00
|
|
|
return ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
|
|
|
{
|
|
|
|
struct bch_fs *c;
|
2023-02-03 19:01:40 -07:00
|
|
|
struct printbuf name = PRINTBUF;
|
2019-02-14 18:39:17 -07:00
|
|
|
unsigned i, iter_size;
|
2021-11-04 14:03:16 -07:00
|
|
|
int ret = 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-02-01 04:35:46 -07:00
|
|
|
c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
|
2021-11-04 14:03:16 -07:00
|
|
|
if (!c) {
|
2023-03-14 12:35:57 -07:00
|
|
|
c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
|
2017-03-16 23:18:50 -07:00
|
|
|
goto out;
|
2021-11-04 14:03:16 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-12-31 08:04:54 -07:00
|
|
|
c->stdio = (void *)(unsigned long) opts.stdio;
|
2023-12-04 18:15:23 -07:00
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
__module_get(THIS_MODULE);
|
|
|
|
|
2020-10-15 12:58:36 -07:00
|
|
|
closure_init(&c->cl, NULL);
|
|
|
|
|
|
|
|
c->kobj.kset = bcachefs_kset;
|
|
|
|
kobject_init(&c->kobj, &bch2_fs_ktype);
|
|
|
|
kobject_init(&c->internal, &bch2_fs_internal_ktype);
|
|
|
|
kobject_init(&c->opts_dir, &bch2_fs_opts_dir_ktype);
|
|
|
|
kobject_init(&c->time_stats, &bch2_fs_time_stats_ktype);
|
2022-03-15 01:36:33 -07:00
|
|
|
kobject_init(&c->counters_kobj, &bch2_fs_counters_ktype);
|
2020-10-15 12:58:36 -07:00
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
c->minor = -1;
|
|
|
|
c->disk_sb.fs_sb = true;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
init_rwsem(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_init(&c->sb_lock);
|
|
|
|
mutex_init(&c->replicas_gc_lock);
|
|
|
|
mutex_init(&c->btree_root_lock);
|
|
|
|
INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
|
|
|
|
|
2023-12-06 14:26:18 -07:00
|
|
|
refcount_set(&c->ro_ref, 1);
|
|
|
|
init_waitqueue_head(&c->ro_ref_wait);
|
2023-12-04 11:45:33 -07:00
|
|
|
sema_init(&c->online_fsck_mutex, 1);
|
2023-12-06 14:26:18 -07:00
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
init_rwsem(&c->gc_lock);
|
2021-12-24 02:51:10 -07:00
|
|
|
mutex_init(&c->gc_gens_lock);
|
2023-11-17 21:13:49 -07:00
|
|
|
atomic_set(&c->journal_keys.ref, 1);
|
|
|
|
c->journal_keys.initial_ref_held = true;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
for (i = 0; i < BCH_TIME_STAT_NR; i++)
|
|
|
|
bch2_time_stats_init(&c->times[i]);
|
|
|
|
|
2024-04-19 19:44:12 -07:00
|
|
|
bch2_fs_gc_init(c);
|
2020-07-11 13:28:54 -07:00
|
|
|
bch2_fs_copygc_init(c);
|
2019-03-07 17:46:10 -07:00
|
|
|
bch2_fs_btree_key_cache_init_early(&c->btree_key_cache);
|
2023-12-14 12:06:41 -07:00
|
|
|
bch2_fs_btree_iter_init_early(c);
|
2023-06-27 21:01:19 -07:00
|
|
|
bch2_fs_btree_interior_update_init_early(c);
|
2018-11-04 19:55:35 -07:00
|
|
|
bch2_fs_allocator_background_init(c);
|
|
|
|
bch2_fs_allocator_foreground_init(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_rebalance_init(c);
|
|
|
|
bch2_fs_quota_init(c);
|
2022-04-08 22:23:50 -07:00
|
|
|
bch2_fs_ec_init_early(c);
|
2023-03-11 18:38:46 -07:00
|
|
|
bch2_fs_move_init(c);
|
2023-10-25 12:51:16 -07:00
|
|
|
bch2_fs_sb_errors_init_early(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
INIT_LIST_HEAD(&c->list);
|
|
|
|
|
|
|
|
mutex_init(&c->bio_bounce_pages_lock);
|
2021-03-15 21:42:25 -07:00
|
|
|
mutex_init(&c->snapshot_table_lock);
|
2023-09-28 22:15:33 -07:00
|
|
|
init_rwsem(&c->snapshot_create_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
spin_lock_init(&c->btree_write_error_lock);
|
|
|
|
|
2021-01-26 18:15:46 -07:00
|
|
|
INIT_LIST_HEAD(&c->journal_iters);
|
2020-03-25 13:12:33 -07:00
|
|
|
|
2023-10-25 12:51:16 -07:00
|
|
|
INIT_LIST_HEAD(&c->fsck_error_msgs);
|
|
|
|
mutex_init(&c->fsck_error_msgs_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2019-02-10 17:34:47 -07:00
|
|
|
seqcount_init(&c->usage_lock);
|
|
|
|
|
2021-05-18 20:53:43 -07:00
|
|
|
sema_init(&c->io_in_flight, 128);
|
|
|
|
|
2023-03-15 08:53:51 -07:00
|
|
|
INIT_LIST_HEAD(&c->vfs_inodes_list);
|
|
|
|
mutex_init(&c->vfs_inodes_lock);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
c->copy_gc_enabled = 1;
|
|
|
|
c->rebalance.enabled = 1;
|
|
|
|
|
2021-12-10 13:41:38 -07:00
|
|
|
c->journal.flush_write_time = &c->times[BCH_TIME_journal_flush_write];
|
|
|
|
c->journal.noflush_write_time = &c->times[BCH_TIME_journal_noflush_write];
|
|
|
|
c->journal.flush_seq_time = &c->times[BCH_TIME_journal_flush_seq];
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_fs_btree_cache_init_early(&c->btree_cache);
|
|
|
|
|
2020-12-03 12:17:33 -07:00
|
|
|
mutex_init(&c->sectors_available_lock);
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = percpu_init_rwsem(&c->mark_lock);
|
|
|
|
if (ret)
|
2018-12-01 08:32:48 -07:00
|
|
|
goto err;
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_lock(&c->sb_lock);
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = bch2_sb_to_fs(c, sb);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2023-02-03 19:01:40 -07:00
|
|
|
pr_uuid(&name, c->sb.user_uuid.b);
|
2023-03-14 12:35:57 -07:00
|
|
|
ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
|
2023-02-03 19:01:40 -07:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-03-03 20:22:03 -07:00
|
|
|
strscpy(c->name, name.buf, sizeof(c->name));
|
|
|
|
printbuf_exit(&name);
|
|
|
|
|
2021-12-04 18:07:19 -07:00
|
|
|
/* Compat: */
|
2023-07-06 19:47:42 -07:00
|
|
|
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
|
2021-12-04 18:07:19 -07:00
|
|
|
!BCH_SB_JOURNAL_FLUSH_DELAY(sb))
|
|
|
|
SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
|
|
|
|
|
2023-07-06 19:47:42 -07:00
|
|
|
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
|
2021-12-04 18:07:19 -07:00
|
|
|
!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
|
|
|
|
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
c->opts = bch2_opts_default;
|
2021-12-14 12:24:41 -07:00
|
|
|
ret = bch2_opts_from_sb(&c->opts, sb);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_opts_apply(&c->opts, opts);
|
|
|
|
|
2022-01-12 00:13:21 -07:00
|
|
|
c->btree_key_cache_btrees |= 1U << BTREE_ID_alloc;
|
|
|
|
if (c->opts.inodes_use_key_cache)
|
|
|
|
c->btree_key_cache_btrees |= 1U << BTREE_ID_inodes;
|
2023-08-27 15:27:41 -07:00
|
|
|
c->btree_key_cache_btrees |= 1U << BTREE_ID_logged_ops;
|
2022-01-12 00:13:21 -07:00
|
|
|
|
2021-12-14 12:24:41 -07:00
|
|
|
c->block_bits = ilog2(block_sectors(c));
|
2017-03-16 23:18:50 -07:00
|
|
|
c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
|
|
|
|
|
2021-11-04 14:03:16 -07:00
|
|
|
if (bch2_fs_init_fault("fs_alloc")) {
|
2021-11-05 18:28:17 -07:00
|
|
|
bch_err(c, "fs_alloc fault injected");
|
|
|
|
ret = -EFAULT;
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
2021-11-04 14:03:16 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2019-12-14 14:20:33 -07:00
|
|
|
iter_size = sizeof(struct sort_iter) +
|
2017-03-16 23:18:50 -07:00
|
|
|
(btree_blocks(c) + 1) * 2 *
|
2019-12-14 14:20:33 -07:00
|
|
|
sizeof(struct sort_iter_set);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-11-02 21:51:33 -07:00
|
|
|
c->inode_shard_bits = ilog2(roundup_pow_of_two(num_possible_cpus()));
|
|
|
|
|
2021-05-22 14:37:25 -07:00
|
|
|
if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
|
2024-01-22 18:55:08 -07:00
|
|
|
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) ||
|
2021-07-10 10:44:42 -07:00
|
|
|
!(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
|
2024-01-22 18:55:08 -07:00
|
|
|
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
|
2020-11-02 15:51:38 -07:00
|
|
|
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
|
2024-01-22 18:55:08 -07:00
|
|
|
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
|
2024-06-05 08:08:20 -07:00
|
|
|
!(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
|
2024-01-22 18:55:08 -07:00
|
|
|
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
|
2024-06-05 08:08:20 -07:00
|
|
|
!(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
|
|
|
|
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
|
2023-03-23 11:09:05 -07:00
|
|
|
!(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
|
|
|
|
WQ_FREEZABLE, 0)) ||
|
2023-02-09 10:21:45 -07:00
|
|
|
#ifndef BCH_WRITE_REF_DEBUG
|
2019-03-21 19:19:57 -07:00
|
|
|
percpu_ref_init(&c->writes, bch2_writes_disabled,
|
|
|
|
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
|
2023-02-09 10:21:45 -07:00
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
|
|
|
|
bioset_init(&c->btree_bio, 1,
|
|
|
|
max(offsetof(struct btree_read_bio, bio),
|
|
|
|
offsetof(struct btree_write_bio, wbio.bio)),
|
|
|
|
BIOSET_NEED_BVECS) ||
|
2018-11-27 06:23:22 -07:00
|
|
|
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
|
2023-12-27 20:09:25 -07:00
|
|
|
!(c->usage = alloc_percpu(struct bch_fs_usage_base)) ||
|
2019-02-10 17:34:47 -07:00
|
|
|
!(c->online_reserved = alloc_percpu(u64)) ||
|
2024-02-01 04:35:46 -07:00
|
|
|
mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
|
|
|
|
c->opts.btree_node_size) ||
|
2019-11-09 14:01:15 -07:00
|
|
|
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
|
2020-11-02 21:51:33 -07:00
|
|
|
!(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
|
2021-11-05 18:28:17 -07:00
|
|
|
sizeof(u64), GFP_KERNEL))) {
|
2023-03-14 12:35:57 -07:00
|
|
|
ret = -BCH_ERR_ENOMEM_fs_other_alloc;
|
2021-11-04 14:03:16 -07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2022-09-25 19:26:48 -07:00
|
|
|
ret = bch2_fs_counters_init(c) ?:
|
2023-10-25 12:51:16 -07:00
|
|
|
bch2_fs_sb_errors_init(c) ?:
|
2022-09-25 19:26:48 -07:00
|
|
|
bch2_io_clock_init(&c->io_clock[READ]) ?:
|
2021-11-05 18:28:17 -07:00
|
|
|
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
|
|
|
|
bch2_fs_journal_init(&c->journal) ?:
|
2024-06-12 16:28:13 -07:00
|
|
|
bch2_fs_btree_iter_init(c) ?:
|
2021-11-05 18:28:17 -07:00
|
|
|
bch2_fs_btree_cache_init(c) ?:
|
|
|
|
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
|
|
|
|
bch2_fs_btree_interior_update_init(c) ?:
|
2022-05-28 13:21:01 -07:00
|
|
|
bch2_fs_buckets_waiting_for_journal_init(c) ?:
|
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-03 22:00:50 -07:00
|
|
|
bch2_fs_btree_write_buffer_init(c) ?:
|
2021-11-05 18:28:17 -07:00
|
|
|
bch2_fs_subvolumes_init(c) ?:
|
2023-09-10 15:05:17 -07:00
|
|
|
bch2_fs_io_read_init(c) ?:
|
|
|
|
bch2_fs_io_write_init(c) ?:
|
2022-12-14 18:52:11 -07:00
|
|
|
bch2_fs_nocow_locking_init(c) ?:
|
2021-11-05 18:28:17 -07:00
|
|
|
bch2_fs_encryption_init(c) ?:
|
|
|
|
bch2_fs_compress_init(c) ?:
|
|
|
|
bch2_fs_ec_init(c) ?:
|
2024-06-08 18:41:01 -07:00
|
|
|
bch2_fs_vfs_init(c) ?:
|
2023-08-03 15:18:21 -07:00
|
|
|
bch2_fs_fsio_init(c) ?:
|
2023-09-14 02:47:44 -07:00
|
|
|
bch2_fs_fs_io_buffered_init(c) ?:
|
2023-08-03 15:18:21 -07:00
|
|
|
bch2_fs_fs_io_direct_init(c);
|
2021-11-04 14:03:16 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2024-05-26 19:52:22 -07:00
|
|
|
for (i = 0; i < c->sb.nr_devices; i++) {
|
|
|
|
if (!bch2_member_exists(c->disk_sb.sb, i))
|
|
|
|
continue;
|
|
|
|
ret = bch2_dev_alloc(c, i);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
2024-05-26 19:52:22 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-02-03 11:10:55 -07:00
|
|
|
bch2_journal_entry_res_resize(&c->journal,
|
|
|
|
&c->btree_root_journal_res,
|
|
|
|
BTREE_ID_NR * (JSET_KEYS_U64s + BKEY_BTREE_PTR_U64s_MAX));
|
|
|
|
bch2_journal_entry_res_resize(&c->journal,
|
|
|
|
&c->clock_journal_res,
|
|
|
|
(sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_lock(&bch_fs_list_lock);
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = bch2_fs_online(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_unlock(&bch_fs_list_lock);
|
2021-11-05 18:28:17 -07:00
|
|
|
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
out:
|
|
|
|
return c;
|
|
|
|
err:
|
|
|
|
bch2_fs_free(c);
|
2021-11-04 14:03:16 -07:00
|
|
|
c = ERR_PTR(ret);
|
2017-03-16 23:18:50 -07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-04-17 15:21:19 -07:00
|
|
|
noinline_for_stack
|
|
|
|
static void print_mount_opts(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
enum bch_opt_id i;
|
2022-02-25 11:18:19 -07:00
|
|
|
struct printbuf p = PRINTBUF;
|
2019-04-17 15:21:19 -07:00
|
|
|
bool first = true;
|
|
|
|
|
2024-06-29 08:43:23 -07:00
|
|
|
prt_str(&p, "starting version ");
|
2023-06-28 16:53:05 -07:00
|
|
|
bch2_version_to_text(&p, c->sb.version);
|
2022-12-20 17:27:02 -07:00
|
|
|
|
2019-04-17 15:21:19 -07:00
|
|
|
if (c->opts.read_only) {
|
2022-12-20 17:27:02 -07:00
|
|
|
prt_str(&p, " opts=");
|
2019-04-17 15:21:19 -07:00
|
|
|
first = false;
|
2022-12-20 17:27:02 -07:00
|
|
|
prt_printf(&p, "ro");
|
2019-04-17 15:21:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < bch2_opts_nr; i++) {
|
|
|
|
const struct bch_option *opt = &bch2_opt_table[i];
|
|
|
|
u64 v = bch2_opt_get_by_id(&c->opts, i);
|
|
|
|
|
2021-12-14 12:24:41 -07:00
|
|
|
if (!(opt->flags & OPT_MOUNT))
|
2019-04-17 15:21:19 -07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
|
|
|
|
continue;
|
|
|
|
|
2022-12-20 17:27:02 -07:00
|
|
|
prt_str(&p, first ? " opts=" : ",");
|
2019-04-17 15:21:19 -07:00
|
|
|
first = false;
|
2022-03-05 10:01:16 -07:00
|
|
|
bch2_opt_to_text(&p, c, c->disk_sb.sb, opt, v, OPT_SHOW_MOUNT_STYLE);
|
2019-04-17 15:21:19 -07:00
|
|
|
}
|
|
|
|
|
2022-12-20 17:27:02 -07:00
|
|
|
bch_info(c, "%s", p.buf);
|
2022-02-25 11:18:19 -07:00
|
|
|
printbuf_exit(&p);
|
2019-04-17 15:21:19 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_fs_start(struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2018-11-04 21:14:46 -07:00
|
|
|
time64_t now = ktime_get_real_seconds();
|
2022-11-19 20:39:08 -07:00
|
|
|
int ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-07-20 15:09:26 -07:00
|
|
|
print_mount_opts(c);
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
BUG_ON(test_bit(BCH_FS_started, &c->flags));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
|
2023-10-25 12:51:16 -07:00
|
|
|
ret = bch2_sb_members_v2_init(c);
|
2023-09-24 21:02:56 -07:00
|
|
|
if (ret) {
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_online_member(c, ca)
|
|
|
|
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-03-30 15:57:53 -07:00
|
|
|
struct bch_sb_field_ext *ext =
|
|
|
|
bch2_sb_field_get_minsize(&c->disk_sb, ext, sizeof(*ext) / sizeof(u64));
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2024-03-30 15:57:53 -07:00
|
|
|
if (!ext) {
|
|
|
|
bch_err(c, "insufficient space in superblock for sb_field_ext");
|
|
|
|
ret = -BCH_ERR_ENOSPC_sb;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_rw_member(c, ca)
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_allocator_add(c, ca);
|
|
|
|
bch2_recalc_capacity(c);
|
|
|
|
|
|
|
|
ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
|
|
|
|
? bch2_fs_recovery(c)
|
|
|
|
: bch2_fs_initialize(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2018-11-01 12:13:19 -07:00
|
|
|
|
|
|
|
ret = bch2_opts_check_may_set(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
if (bch2_fs_init_fault("fs_start")) {
|
|
|
|
bch_err(c, "fs_start fault injected");
|
2022-11-19 20:39:08 -07:00
|
|
|
ret = -EINVAL;
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
2021-11-05 18:28:17 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-11-26 15:05:02 -07:00
|
|
|
set_bit(BCH_FS_started, &c->flags);
|
2020-05-11 17:01:07 -07:00
|
|
|
|
2023-12-23 15:50:29 -07:00
|
|
|
if (c->opts.read_only) {
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_read_only(c);
|
|
|
|
} else {
|
2023-11-26 15:05:02 -07:00
|
|
|
ret = !test_bit(BCH_FS_rw, &c->flags)
|
2019-04-17 15:21:19 -07:00
|
|
|
? bch2_fs_read_write(c)
|
|
|
|
: bch2_fs_read_write_late(c);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2019-04-17 15:21:19 -07:00
|
|
|
ret = 0;
|
2023-12-23 19:39:45 -07:00
|
|
|
err:
|
|
|
|
if (ret)
|
|
|
|
bch_err_msg(c, ret, "starting filesystem");
|
|
|
|
else
|
|
|
|
bch_verbose(c, "done starting filesystem");
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2019-04-17 15:21:19 -07:00
|
|
|
return ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2023-09-24 20:55:37 -07:00
|
|
|
struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-12-14 12:24:41 -07:00
|
|
|
if (le16_to_cpu(sb->block_size) != block_sectors(c))
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_mismatched_block_size;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-09-24 20:55:37 -07:00
|
|
|
if (le16_to_cpu(m.bucket_size) <
|
2017-03-16 23:18:50 -07:00
|
|
|
BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_bucket_size_too_small;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
return 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2023-06-27 18:02:27 -07:00
|
|
|
static int bch2_dev_in_fs(struct bch_sb_handle *fs,
|
2024-03-08 14:03:19 -07:00
|
|
|
struct bch_sb_handle *sb,
|
|
|
|
struct bch_opts *opts)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2023-06-27 18:02:27 -07:00
|
|
|
if (fs == sb)
|
|
|
|
return 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-06-27 18:02:27 -07:00
|
|
|
if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_not_a_member_of_filesystem;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-04-11 20:31:55 -07:00
|
|
|
if (!bch2_member_exists(fs->sb, sb->sb->dev_idx))
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_has_been_removed;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-06-27 18:02:27 -07:00
|
|
|
if (fs->sb->block_size != sb->sb->block_size)
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_mismatched_block_size;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-06-27 18:02:27 -07:00
|
|
|
if (le16_to_cpu(fs->sb->version) < bcachefs_metadata_version_member_seq ||
|
|
|
|
le16_to_cpu(sb->sb->version) < bcachefs_metadata_version_member_seq)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (fs->sb->seq == sb->sb->seq &&
|
|
|
|
fs->sb->write_time != sb->sb->write_time) {
|
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
|
2024-01-05 09:58:50 -07:00
|
|
|
prt_str(&buf, "Split brain detected between ");
|
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
prt_str(&buf, " and ");
|
|
|
|
prt_bdevname(&buf, fs->bdev);
|
|
|
|
prt_char(&buf, ':');
|
2023-06-27 18:02:27 -07:00
|
|
|
prt_newline(&buf);
|
|
|
|
prt_printf(&buf, "seq=%llu but write_time different, got", le64_to_cpu(sb->sb->seq));
|
|
|
|
prt_newline(&buf);
|
|
|
|
|
2024-01-05 09:58:50 -07:00
|
|
|
prt_bdevname(&buf, fs->bdev);
|
|
|
|
prt_char(&buf, ' ');
|
2023-06-27 18:02:27 -07:00
|
|
|
bch2_prt_datetime(&buf, le64_to_cpu(fs->sb->write_time));;
|
|
|
|
prt_newline(&buf);
|
|
|
|
|
2024-01-05 09:58:50 -07:00
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
prt_char(&buf, ' ');
|
2023-06-27 18:02:27 -07:00
|
|
|
bch2_prt_datetime(&buf, le64_to_cpu(sb->sb->write_time));;
|
|
|
|
prt_newline(&buf);
|
|
|
|
|
2024-03-08 14:03:19 -07:00
|
|
|
if (!opts->no_splitbrain_check)
|
|
|
|
prt_printf(&buf, "Not using older sb");
|
2023-06-27 18:02:27 -07:00
|
|
|
|
|
|
|
pr_err("%s", buf.buf);
|
|
|
|
printbuf_exit(&buf);
|
2024-03-08 14:03:19 -07:00
|
|
|
|
|
|
|
if (!opts->no_splitbrain_check)
|
|
|
|
return -BCH_ERR_device_splitbrain;
|
2023-06-27 18:02:27 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
struct bch_member m = bch2_sb_member_get(fs->sb, sb->sb->dev_idx);
|
|
|
|
u64 seq_from_fs = le64_to_cpu(m.seq);
|
|
|
|
u64 seq_from_member = le64_to_cpu(sb->sb->seq);
|
|
|
|
|
|
|
|
if (seq_from_fs && seq_from_fs < seq_from_member) {
|
2024-01-05 09:58:50 -07:00
|
|
|
struct printbuf buf = PRINTBUF;
|
|
|
|
|
|
|
|
prt_str(&buf, "Split brain detected between ");
|
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
prt_str(&buf, " and ");
|
|
|
|
prt_bdevname(&buf, fs->bdev);
|
|
|
|
prt_char(&buf, ':');
|
|
|
|
prt_newline(&buf);
|
|
|
|
|
|
|
|
prt_bdevname(&buf, fs->bdev);
|
2024-01-26 22:31:13 -07:00
|
|
|
prt_str(&buf, " believes seq of ");
|
2024-01-05 09:58:50 -07:00
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
prt_printf(&buf, " to be %llu, but ", seq_from_fs);
|
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
prt_printf(&buf, " has %llu\n", seq_from_member);
|
2024-03-08 14:03:19 -07:00
|
|
|
|
|
|
|
if (!opts->no_splitbrain_check) {
|
|
|
|
prt_str(&buf, "Not using ");
|
|
|
|
prt_bdevname(&buf, sb->bdev);
|
|
|
|
}
|
2024-01-05 09:58:50 -07:00
|
|
|
|
|
|
|
pr_err("%s", buf.buf);
|
|
|
|
printbuf_exit(&buf);
|
2024-03-08 14:03:19 -07:00
|
|
|
|
|
|
|
if (!opts->no_splitbrain_check)
|
|
|
|
return -BCH_ERR_device_splitbrain;
|
2023-06-27 18:02:27 -07:00
|
|
|
}
|
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
return 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Device startup/shutdown: */
|
|
|
|
|
|
|
|
static void bch2_dev_release(struct kobject *kobj)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
|
|
|
|
|
|
|
|
kfree(ca);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_dev_free(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
cancel_work_sync(&ca->io_error_work);
|
|
|
|
|
2024-10-12 11:36:38 -07:00
|
|
|
bch2_dev_unlink(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (ca->kobj.state_in_sysfs)
|
|
|
|
kobject_del(&ca->kobj);
|
|
|
|
|
|
|
|
bch2_free_super(&ca->disk_sb);
|
2024-06-22 21:53:44 -07:00
|
|
|
bch2_dev_allocator_background_exit(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_journal_exit(ca);
|
|
|
|
|
|
|
|
free_percpu(ca->io_done);
|
|
|
|
bch2_dev_buckets_free(ca);
|
2019-04-06 11:32:06 -07:00
|
|
|
free_page((unsigned long) ca->sb_read_scratch);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-02-01 13:41:42 -07:00
|
|
|
bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
|
|
|
|
bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
percpu_ref_exit(&ca->io_ref);
|
2024-05-03 15:07:40 -07:00
|
|
|
#ifndef CONFIG_BCACHEFS_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
percpu_ref_exit(&ca->ref);
|
2024-05-03 15:07:40 -07:00
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
kobject_put(&ca->kobj);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
|
|
|
|
lockdep_assert_held(&c->state_lock);
|
|
|
|
|
|
|
|
if (percpu_ref_is_zero(&ca->io_ref))
|
|
|
|
return;
|
|
|
|
|
|
|
|
__bch2_dev_read_only(c, ca);
|
|
|
|
|
|
|
|
reinit_completion(&ca->io_ref_completion);
|
|
|
|
percpu_ref_kill(&ca->io_ref);
|
|
|
|
wait_for_completion(&ca->io_ref_completion);
|
|
|
|
|
2024-10-12 11:36:38 -07:00
|
|
|
bch2_dev_unlink(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_free_super(&ca->disk_sb);
|
|
|
|
bch2_dev_journal_exit(ca);
|
|
|
|
}
|
|
|
|
|
2024-05-03 15:07:40 -07:00
|
|
|
#ifndef CONFIG_BCACHEFS_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
static void bch2_dev_ref_complete(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = container_of(ref, struct bch_dev, ref);
|
|
|
|
|
|
|
|
complete(&ca->ref_completion);
|
|
|
|
}
|
2024-05-03 15:07:40 -07:00
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
static void bch2_dev_io_ref_complete(struct percpu_ref *ref)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref);
|
|
|
|
|
|
|
|
complete(&ca->io_ref_completion);
|
|
|
|
}
|
|
|
|
|
2024-10-12 11:36:38 -07:00
|
|
|
static void bch2_dev_unlink(struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
struct kobject *b;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is racy w.r.t. the underlying block device being hot-removed,
|
|
|
|
* which removes it from sysfs.
|
|
|
|
*
|
|
|
|
* It'd be lovely if we had a way to handle this race, but the sysfs
|
|
|
|
* code doesn't appear to provide a good method and block/holder.c is
|
|
|
|
* susceptible as well:
|
|
|
|
*/
|
|
|
|
if (ca->kobj.state_in_sysfs &&
|
|
|
|
ca->disk_sb.bdev &&
|
|
|
|
(b = bdev_kobj(ca->disk_sb.bdev))->state_in_sysfs) {
|
|
|
|
sysfs_remove_link(b, "bcachefs");
|
|
|
|
sysfs_remove_link(&ca->kobj, "block");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
static int bch2_dev_sysfs_online(struct bch_fs *c, struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!c->kobj.state_in_sysfs)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!ca->kobj.state_in_sysfs) {
|
|
|
|
ret = kobject_add(&ca->kobj, &c->kobj,
|
|
|
|
"dev-%u", ca->dev_idx);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ca->disk_sb.bdev) {
|
|
|
|
struct kobject *block = bdev_kobj(ca->disk_sb.bdev);
|
|
|
|
|
|
|
|
ret = sysfs_create_link(block, &ca->kobj, "bcachefs");
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
ret = sysfs_create_link(&ca->kobj, block, "block");
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
|
|
|
|
struct bch_member *member)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca;
|
2023-10-25 13:29:37 -07:00
|
|
|
unsigned i;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
|
|
|
|
if (!ca)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
kobject_init(&ca->kobj, &bch2_dev_ktype);
|
|
|
|
init_completion(&ca->ref_completion);
|
|
|
|
init_completion(&ca->io_ref_completion);
|
|
|
|
|
|
|
|
init_rwsem(&ca->bucket_lock);
|
|
|
|
|
|
|
|
INIT_WORK(&ca->io_error_work, bch2_io_error_work);
|
|
|
|
|
2024-02-01 13:41:42 -07:00
|
|
|
bch2_time_stats_quantiles_init(&ca->io_latency[READ]);
|
|
|
|
bch2_time_stats_quantiles_init(&ca->io_latency[WRITE]);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ca->mi = bch2_mi_to_cpu(member);
|
2023-10-25 13:29:37 -07:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(member->errors); i++)
|
|
|
|
atomic64_set(&ca->errors[i], le64_to_cpu(member->errors[i]));
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
ca->uuid = member->uuid;
|
|
|
|
|
2022-01-09 18:48:31 -07:00
|
|
|
ca->nr_btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
|
|
|
|
ca->mi.bucket_size / btree_sectors(c));
|
|
|
|
|
2024-05-03 15:07:40 -07:00
|
|
|
#ifndef CONFIG_BCACHEFS_DEBUG
|
|
|
|
if (percpu_ref_init(&ca->ref, bch2_dev_ref_complete, 0, GFP_KERNEL))
|
|
|
|
goto err;
|
|
|
|
#else
|
|
|
|
atomic_long_set(&ca->ref, 1);
|
|
|
|
#endif
|
|
|
|
|
2024-06-22 21:53:44 -07:00
|
|
|
bch2_dev_allocator_background_init(ca);
|
|
|
|
|
2024-05-03 15:07:40 -07:00
|
|
|
if (percpu_ref_init(&ca->io_ref, bch2_dev_io_ref_complete,
|
2017-03-16 23:18:50 -07:00
|
|
|
PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
|
2019-04-06 11:32:06 -07:00
|
|
|
!(ca->sb_read_scratch = (void *) __get_free_page(GFP_KERNEL)) ||
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_buckets_alloc(c, ca) ||
|
|
|
|
!(ca->io_done = alloc_percpu(*ca->io_done)))
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return ca;
|
|
|
|
err:
|
|
|
|
bch2_dev_free(ca);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bch2_dev_attach(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
unsigned dev_idx)
|
|
|
|
{
|
|
|
|
ca->dev_idx = dev_idx;
|
|
|
|
__set_bit(ca->dev_idx, ca->self.d);
|
|
|
|
scnprintf(ca->name, sizeof(ca->name), "dev-%u", dev_idx);
|
|
|
|
|
|
|
|
ca->fs = c;
|
|
|
|
rcu_assign_pointer(c->devs[ca->dev_idx], ca);
|
|
|
|
|
|
|
|
if (bch2_dev_sysfs_online(c, ca))
|
|
|
|
pr_warn("error creating sysfs objects");
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
|
|
|
|
{
|
2023-09-24 20:55:37 -07:00
|
|
|
struct bch_member member = bch2_sb_member_get(c->disk_sb.sb, dev_idx);
|
2017-03-16 23:18:50 -07:00
|
|
|
struct bch_dev *ca = NULL;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (bch2_fs_init_fault("dev_alloc"))
|
|
|
|
goto err;
|
|
|
|
|
2023-09-24 20:55:37 -07:00
|
|
|
ca = __bch2_dev_alloc(c, &member);
|
2017-03-16 23:18:50 -07:00
|
|
|
if (!ca)
|
|
|
|
goto err;
|
|
|
|
|
2021-03-11 19:46:23 -07:00
|
|
|
ca->fs = c;
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_attach(c, ca, dev_idx);
|
|
|
|
return ret;
|
|
|
|
err:
|
|
|
|
if (ca)
|
|
|
|
bch2_dev_free(ca);
|
2023-07-07 01:38:29 -07:00
|
|
|
return -BCH_ERR_ENOMEM_dev_alloc;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
|
|
|
|
{
|
|
|
|
unsigned ret;
|
|
|
|
|
|
|
|
if (bch2_dev_is_online(ca)) {
|
|
|
|
bch_err(ca, "already have device online in slot %u",
|
|
|
|
sb->sb->dev_idx);
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_already_online;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (get_capacity(sb->bdev->bd_disk) <
|
|
|
|
ca->mi.bucket_size * ca->mi.nbuckets) {
|
|
|
|
bch_err(ca, "cannot online: device too small");
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_size_too_small;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
|
|
|
|
|
|
|
|
ret = bch2_dev_journal_init(ca, sb->sb);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
/* Commit: */
|
|
|
|
ca->disk_sb = *sb;
|
|
|
|
memset(sb, 0, sizeof(*sb));
|
|
|
|
|
2022-01-02 19:45:35 -07:00
|
|
|
ca->dev = ca->disk_sb.bdev->bd_dev;
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
percpu_ref_reinit(&ca->io_ref);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
lockdep_assert_held(&c->state_lock);
|
|
|
|
|
|
|
|
if (le64_to_cpu(sb->sb->seq) >
|
|
|
|
le64_to_cpu(c->disk_sb.sb->seq))
|
|
|
|
bch2_sb_to_fs(c, sb->sb);
|
|
|
|
|
2024-04-11 20:31:55 -07:00
|
|
|
BUG_ON(!bch2_dev_exists(c, sb->sb->dev_idx));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-04-11 20:31:55 -07:00
|
|
|
ca = bch2_dev_locked(c, sb->sb->dev_idx);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ret = __bch2_dev_attach_bdev(ca, sb);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
bch2_dev_sysfs_online(c, ca);
|
|
|
|
|
2024-01-05 09:58:50 -07:00
|
|
|
struct printbuf name = PRINTBUF;
|
|
|
|
prt_bdevname(&name, ca->disk_sb.bdev);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
if (c->sb.nr_devices == 1)
|
2024-01-10 16:54:41 -07:00
|
|
|
strscpy(c->name, name.buf, sizeof(c->name));
|
|
|
|
strscpy(ca->name, name.buf, sizeof(ca->name));
|
2024-01-05 09:58:50 -07:00
|
|
|
|
|
|
|
printbuf_exit(&name);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
rebalance_wakeup(c);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Device management: */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: this function is also used by the error paths - when a particular
|
|
|
|
* device sees an error, we call it to determine whether we can just set the
|
|
|
|
* device RO, or - if this function returns false - we'll set the whole
|
|
|
|
* filesystem RO:
|
|
|
|
*
|
|
|
|
* XXX: maybe we should be more explicit about whether we're changing state
|
|
|
|
* because we got an error or what have you?
|
|
|
|
*/
|
|
|
|
bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
enum bch_member_state new_state, int flags)
|
|
|
|
{
|
|
|
|
struct bch_devs_mask new_online_devs;
|
2023-12-16 21:47:29 -07:00
|
|
|
int nr_rw = 0, required;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
lockdep_assert_held(&c->state_lock);
|
|
|
|
|
|
|
|
switch (new_state) {
|
2021-02-20 17:47:58 -07:00
|
|
|
case BCH_MEMBER_STATE_rw:
|
2017-03-16 23:18:50 -07:00
|
|
|
return true;
|
2021-02-20 17:47:58 -07:00
|
|
|
case BCH_MEMBER_STATE_ro:
|
|
|
|
if (ca->mi.state != BCH_MEMBER_STATE_rw)
|
2017-03-16 23:18:50 -07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* do we have enough devices to write to? */
|
2023-12-16 21:47:29 -07:00
|
|
|
for_each_member_device(c, ca2)
|
2017-03-16 23:18:50 -07:00
|
|
|
if (ca2 != ca)
|
2021-02-20 17:47:58 -07:00
|
|
|
nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
|
|
|
|
? c->opts.metadata_replicas
|
2024-02-10 19:01:40 -07:00
|
|
|
: metadata_replicas_required(c),
|
2017-03-16 23:18:50 -07:00
|
|
|
!(flags & BCH_FORCE_IF_DATA_DEGRADED)
|
|
|
|
? c->opts.data_replicas
|
2024-02-10 19:01:40 -07:00
|
|
|
: data_replicas_required(c));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
return nr_rw >= required;
|
2021-02-20 17:47:58 -07:00
|
|
|
case BCH_MEMBER_STATE_failed:
|
|
|
|
case BCH_MEMBER_STATE_spare:
|
|
|
|
if (ca->mi.state != BCH_MEMBER_STATE_rw &&
|
|
|
|
ca->mi.state != BCH_MEMBER_STATE_ro)
|
2017-03-16 23:18:50 -07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* do we have enough devices to read from? */
|
|
|
|
new_online_devs = bch2_online_devs(c);
|
|
|
|
__clear_bit(ca->dev_idx, new_online_devs.d);
|
|
|
|
|
2021-02-06 21:17:26 -07:00
|
|
|
return bch2_have_enough_devs(c, new_online_devs, flags, false);
|
2017-03-16 23:18:50 -07:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bch2_fs_may_start(struct bch_fs *c)
|
|
|
|
{
|
|
|
|
struct bch_dev *ca;
|
2021-02-06 21:17:26 -07:00
|
|
|
unsigned i, flags = 0;
|
|
|
|
|
|
|
|
if (c->opts.very_degraded)
|
|
|
|
flags |= BCH_FORCE_IF_DEGRADED|BCH_FORCE_IF_LOST;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-02-06 21:17:26 -07:00
|
|
|
if (c->opts.degraded)
|
|
|
|
flags |= BCH_FORCE_IF_DEGRADED;
|
|
|
|
|
|
|
|
if (!c->opts.degraded &&
|
|
|
|
!c->opts.very_degraded) {
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
|
|
|
|
for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
|
2024-04-11 20:31:55 -07:00
|
|
|
if (!bch2_member_exists(c->disk_sb.sb, i))
|
2017-03-16 23:18:50 -07:00
|
|
|
continue;
|
|
|
|
|
2024-04-11 20:31:55 -07:00
|
|
|
ca = bch2_dev_locked(c, i);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (!bch2_dev_is_online(ca) &&
|
2021-02-20 17:47:58 -07:00
|
|
|
(ca->mi.state == BCH_MEMBER_STATE_rw ||
|
|
|
|
ca->mi.state == BCH_MEMBER_STATE_ro)) {
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
}
|
|
|
|
|
2021-02-06 21:17:26 -07:00
|
|
|
return bch2_have_enough_devs(c, bch2_online_devs(c), flags, true);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __bch2_dev_read_only(struct bch_fs *c, struct bch_dev *ca)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The allocator thread itself allocates btree nodes, so stop it first:
|
|
|
|
*/
|
|
|
|
bch2_dev_allocator_remove(c, ca);
|
2024-06-23 07:10:43 -07:00
|
|
|
bch2_recalc_capacity(c);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_dev_journal_stop(&c->journal, ca);
|
|
|
|
}
|
|
|
|
|
2022-01-09 18:48:31 -07:00
|
|
|
static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
|
|
|
lockdep_assert_held(&c->state_lock);
|
|
|
|
|
2021-02-20 17:47:58 -07:00
|
|
|
BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_dev_allocator_add(c, ca);
|
|
|
|
bch2_recalc_capacity(c);
|
2024-06-22 21:53:44 -07:00
|
|
|
bch2_dev_do_discards(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
enum bch_member_state new_state, int flags)
|
|
|
|
{
|
2023-09-24 21:02:56 -07:00
|
|
|
struct bch_member *m;
|
2017-03-16 23:18:50 -07:00
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
if (ca->mi.state == new_state)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_state_not_allowed;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-02-20 17:47:58 -07:00
|
|
|
if (new_state != BCH_MEMBER_STATE_rw)
|
2017-03-16 23:18:50 -07:00
|
|
|
__bch2_dev_read_only(c, ca);
|
|
|
|
|
2021-02-20 17:47:58 -07:00
|
|
|
bch_notice(ca, "%s", bch2_member_states[new_state]);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-24 21:02:56 -07:00
|
|
|
m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
|
|
|
|
SET_BCH_MEMBER_STATE(m, new_state);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
if (new_state == BCH_MEMBER_STATE_rw)
|
2022-01-09 18:48:31 -07:00
|
|
|
__bch2_dev_read_write(c, ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
rebalance_wakeup(c);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
enum bch_member_state new_state, int flags)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
ret = __bch2_dev_set_state(c, ca, new_state, flags);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Device add/removal: */
|
|
|
|
|
|
|
|
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
|
|
|
{
|
2023-09-24 21:02:56 -07:00
|
|
|
struct bch_member *m;
|
2017-03-16 23:18:50 -07:00
|
|
|
unsigned dev_idx = ca->dev_idx, data;
|
2022-11-19 20:39:08 -07:00
|
|
|
int ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-01-03 20:38:14 -07:00
|
|
|
/*
|
|
|
|
* We consume a reference to ca->ref, regardless of whether we succeed
|
|
|
|
* or fail:
|
|
|
|
*/
|
2024-05-03 14:39:16 -07:00
|
|
|
bch2_dev_put(ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2021-02-20 17:47:58 -07:00
|
|
|
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
|
2017-03-16 23:18:50 -07:00
|
|
|
bch_err(ca, "Cannot remove without losing data");
|
2022-11-19 20:39:08 -07:00
|
|
|
ret = -BCH_ERR_device_state_not_allowed;
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
__bch2_dev_read_only(c, ca);
|
|
|
|
|
|
|
|
ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
|
2024-02-12 13:19:22 -07:00
|
|
|
bch_err_msg(ca, ret, "bch2_dev_data_drop()");
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2022-04-09 12:15:36 -07:00
|
|
|
ret = bch2_dev_remove_alloc(c, ca);
|
2024-02-12 13:19:22 -07:00
|
|
|
bch_err_msg(ca, ret, "bch2_dev_remove_alloc()");
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2023-11-09 12:22:46 -07:00
|
|
|
/*
|
|
|
|
* We need to flush the entire journal to get rid of keys that reference
|
|
|
|
* the device being removed before removing the superblock entry
|
|
|
|
*/
|
|
|
|
bch2_journal_flush_all_pins(&c->journal);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this is really just needed for the bch2_replicas_gc_(start|end)
|
|
|
|
* calls, and could be cleaned up:
|
|
|
|
*/
|
2022-04-09 12:15:36 -07:00
|
|
|
ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
|
2024-02-12 13:19:22 -07:00
|
|
|
bch_err_msg(ca, ret, "bch2_journal_flush_device_pins()");
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2022-04-09 12:15:36 -07:00
|
|
|
ret = bch2_journal_flush(&c->journal);
|
2024-02-12 13:19:22 -07:00
|
|
|
bch_err_msg(ca, ret, "bch2_journal_flush()");
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2020-01-03 20:38:14 -07:00
|
|
|
ret = bch2_replicas_gc2(c);
|
2024-02-12 13:19:22 -07:00
|
|
|
bch_err_msg(ca, ret, "bch2_replicas_gc2()");
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2020-01-03 20:38:14 -07:00
|
|
|
goto err;
|
|
|
|
|
|
|
|
data = bch2_dev_has_data(c, ca);
|
|
|
|
if (data) {
|
2022-02-25 11:18:19 -07:00
|
|
|
struct printbuf data_has = PRINTBUF;
|
2020-01-03 20:38:14 -07:00
|
|
|
|
2024-01-06 18:57:43 -07:00
|
|
|
prt_bitflags(&data_has, __bch2_data_types, data);
|
2022-02-25 11:18:19 -07:00
|
|
|
bch_err(ca, "Remove failed, still has data (%s)", data_has.buf);
|
|
|
|
printbuf_exit(&data_has);
|
2020-01-03 20:38:14 -07:00
|
|
|
ret = -EBUSY;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
__bch2_dev_offline(c, ca);
|
|
|
|
|
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
rcu_assign_pointer(c->devs[ca->dev_idx], NULL);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2024-05-03 15:07:40 -07:00
|
|
|
#ifndef CONFIG_BCACHEFS_DEBUG
|
2017-03-16 23:18:50 -07:00
|
|
|
percpu_ref_kill(&ca->ref);
|
2024-05-03 15:07:40 -07:00
|
|
|
#else
|
|
|
|
ca->dying = true;
|
|
|
|
bch2_dev_put(ca);
|
|
|
|
#endif
|
2017-03-16 23:18:50 -07:00
|
|
|
wait_for_completion(&ca->ref_completion);
|
|
|
|
|
|
|
|
bch2_dev_free(ca);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Free this device's slot in the bch_member array - all pointers to
|
|
|
|
* this device must be gone:
|
|
|
|
*/
|
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-24 21:02:56 -07:00
|
|
|
m = bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx);
|
|
|
|
memset(&m->uuid, 0, sizeof(m->uuid));
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_write_super(c);
|
|
|
|
|
|
|
|
mutex_unlock(&c->sb_lock);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return 0;
|
|
|
|
err:
|
2021-02-20 17:47:58 -07:00
|
|
|
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
|
2018-12-18 06:41:58 -07:00
|
|
|
!percpu_ref_is_zero(&ca->io_ref))
|
2017-03-16 23:18:50 -07:00
|
|
|
__bch2_dev_read_write(c, ca);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add new device to running filesystem: */
|
|
|
|
int bch2_dev_add(struct bch_fs *c, const char *path)
|
|
|
|
{
|
|
|
|
struct bch_opts opts = bch2_opts_empty();
|
|
|
|
struct bch_sb_handle sb;
|
|
|
|
struct bch_dev *ca = NULL;
|
2022-02-25 11:18:19 -07:00
|
|
|
struct printbuf errbuf = PRINTBUF;
|
2022-08-18 14:57:24 -07:00
|
|
|
struct printbuf label = PRINTBUF;
|
2017-03-16 23:18:50 -07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = bch2_read_super(path, &opts, &sb);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "reading super");
|
|
|
|
if (ret)
|
2022-01-03 21:38:50 -07:00
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-09-01 15:08:25 -07:00
|
|
|
struct bch_member dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-08-18 14:57:24 -07:00
|
|
|
if (BCH_MEMBER_GROUP(&dev_mi)) {
|
2023-10-22 08:12:14 -07:00
|
|
|
bch2_disk_path_to_text_sb(&label, sb.sb, BCH_MEMBER_GROUP(&dev_mi) - 1);
|
2022-08-18 14:57:24 -07:00
|
|
|
if (label.allocation_failure) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
ret = bch2_dev_may_add(sb.sb, c);
|
2023-12-16 20:43:41 -07:00
|
|
|
if (ret)
|
2022-01-03 21:38:50 -07:00
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ca = __bch2_dev_alloc(c, &dev_mi);
|
|
|
|
if (!ca) {
|
2022-01-03 21:38:50 -07:00
|
|
|
ret = -ENOMEM;
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = __bch2_dev_attach_bdev(ca, &sb);
|
2023-09-13 09:44:08 -07:00
|
|
|
if (ret)
|
2022-01-03 21:38:50 -07:00
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-06-28 16:47:31 -07:00
|
|
|
ret = bch2_dev_journal_alloc(ca, true);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "allocating journal");
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
|
|
|
|
ret = bch2_sb_from_fs(c, ca);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "setting up new superblock");
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err_unlock;
|
|
|
|
|
|
|
|
if (dynamic_fault("bcachefs:add:no_slot"))
|
2024-09-01 15:08:25 -07:00
|
|
|
goto err_unlock;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2024-09-01 15:08:25 -07:00
|
|
|
ret = bch2_sb_member_alloc(c);
|
|
|
|
if (ret < 0) {
|
2023-09-10 22:37:34 -07:00
|
|
|
bch_err_msg(c, ret, "setting up new superblock");
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err_unlock;
|
2021-12-28 14:31:57 -07:00
|
|
|
}
|
2024-09-01 15:08:25 -07:00
|
|
|
unsigned dev_idx = ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
/* success: */
|
|
|
|
|
2024-09-01 15:08:25 -07:00
|
|
|
dev_mi.last_mount = cpu_to_le64(ktime_get_real_seconds());
|
|
|
|
*bch2_members_v2_get_mut(c->disk_sb.sb, dev_idx) = dev_mi;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ca->disk_sb.sb->dev_idx = dev_idx;
|
|
|
|
bch2_dev_attach(c, ca, dev_idx);
|
|
|
|
|
2022-08-18 14:57:24 -07:00
|
|
|
if (BCH_MEMBER_GROUP(&dev_mi)) {
|
|
|
|
ret = __bch2_dev_group_set(c, ca, label.buf);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "creating new label");
|
|
|
|
if (ret)
|
2022-08-18 14:57:24 -07:00
|
|
|
goto err_unlock;
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2024-02-11 20:48:05 -07:00
|
|
|
ret = bch2_dev_usage_init(ca, false);
|
2023-11-09 12:22:46 -07:00
|
|
|
if (ret)
|
|
|
|
goto err_late;
|
|
|
|
|
2024-04-07 15:05:34 -07:00
|
|
|
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(ca, ret, "marking new superblock");
|
|
|
|
if (ret)
|
2021-01-22 15:56:34 -07:00
|
|
|
goto err_late;
|
2020-10-16 18:36:26 -07:00
|
|
|
|
2021-12-11 15:13:09 -07:00
|
|
|
ret = bch2_fs_freespace_init(c);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(ca, ret, "initializing free space");
|
|
|
|
if (ret)
|
2021-12-11 15:13:09 -07:00
|
|
|
goto err_late;
|
|
|
|
|
2021-12-24 02:22:20 -07:00
|
|
|
ca->new_fs_bucket_idx = 0;
|
|
|
|
|
2022-01-09 18:48:31 -07:00
|
|
|
if (ca->mi.state == BCH_MEMBER_STATE_rw)
|
|
|
|
__bch2_dev_read_write(c, ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_unlock:
|
|
|
|
mutex_unlock(&c->sb_lock);
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
err:
|
|
|
|
if (ca)
|
|
|
|
bch2_dev_free(ca);
|
|
|
|
bch2_free_super(&sb);
|
2022-08-18 14:57:24 -07:00
|
|
|
printbuf_exit(&label);
|
2022-02-25 11:18:19 -07:00
|
|
|
printbuf_exit(&errbuf);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_fn(c, ret);
|
2017-03-16 23:18:50 -07:00
|
|
|
return ret;
|
|
|
|
err_late:
|
2021-01-22 15:56:34 -07:00
|
|
|
up_write(&c->state_lock);
|
2022-01-03 21:38:50 -07:00
|
|
|
ca = NULL;
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hot add existing device to running filesystem: */
|
|
|
|
int bch2_dev_online(struct bch_fs *c, const char *path)
|
|
|
|
{
|
|
|
|
struct bch_opts opts = bch2_opts_empty();
|
|
|
|
struct bch_sb_handle sb = { NULL };
|
|
|
|
struct bch_dev *ca;
|
|
|
|
unsigned dev_idx;
|
|
|
|
int ret;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
ret = bch2_read_super(path, &opts, &sb);
|
|
|
|
if (ret) {
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
dev_idx = sb.sb->dev_idx;
|
|
|
|
|
2024-03-08 14:03:19 -07:00
|
|
|
ret = bch2_dev_in_fs(&c->disk_sb, &sb, &c->opts);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "bringing %s online", path);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2021-11-05 18:28:17 -07:00
|
|
|
ret = bch2_dev_attach_bdev(c, &sb);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2024-04-11 20:31:55 -07:00
|
|
|
ca = bch2_dev_locked(c, dev_idx);
|
2021-01-22 15:56:34 -07:00
|
|
|
|
2024-04-07 15:05:34 -07:00
|
|
|
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
|
|
|
|
if (ret)
|
2021-01-22 15:56:34 -07:00
|
|
|
goto err;
|
|
|
|
|
2022-01-09 18:48:31 -07:00
|
|
|
if (ca->mi.state == BCH_MEMBER_STATE_rw)
|
|
|
|
__bch2_dev_read_write(c, ca);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-10-21 10:54:39 -07:00
|
|
|
if (!ca->mi.freespace_initialized) {
|
|
|
|
ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
|
|
|
|
bch_err_msg(ca, ret, "initializing free space");
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-10-21 10:54:39 -07:00
|
|
|
if (!ca->journal.nr) {
|
2024-06-28 16:47:31 -07:00
|
|
|
ret = bch2_dev_journal_alloc(ca, false);
|
2023-10-21 10:54:39 -07:00
|
|
|
bch_err_msg(ca, ret, "allocating journal");
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2023-10-21 10:54:39 -07:00
|
|
|
mutex_lock(&c->sb_lock);
|
|
|
|
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount =
|
|
|
|
cpu_to_le64(ktime_get_real_seconds());
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return 0;
|
|
|
|
err:
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_free_super(&sb);
|
2022-11-19 20:39:08 -07:00
|
|
|
return ret;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
|
|
|
|
{
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (!bch2_dev_is_online(ca)) {
|
|
|
|
bch_err(ca, "Already offline");
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-02-20 17:47:58 -07:00
|
|
|
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
|
2017-03-16 23:18:50 -07:00
|
|
|
bch_err(ca, "Cannot offline required disk");
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2022-11-19 20:39:08 -07:00
|
|
|
return -BCH_ERR_device_state_not_allowed;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
__bch2_dev_offline(c, ca);
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
|
|
|
{
|
2023-09-24 21:02:56 -07:00
|
|
|
struct bch_member *m;
|
2023-09-28 14:57:21 -07:00
|
|
|
u64 old_nbuckets;
|
2017-03-16 23:18:50 -07:00
|
|
|
int ret = 0;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2023-09-28 14:57:21 -07:00
|
|
|
old_nbuckets = ca->mi.nbuckets;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (nbuckets < ca->mi.nbuckets) {
|
|
|
|
bch_err(ca, "Cannot shrink yet");
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2024-05-04 10:26:37 -07:00
|
|
|
if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) {
|
|
|
|
bch_err(ca, "New device size too big (%llu greater than max %u)",
|
|
|
|
nbuckets, BCH_MEMBER_NBUCKETS_MAX);
|
|
|
|
ret = -BCH_ERR_device_size_too_big;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
if (bch2_dev_is_online(ca) &&
|
|
|
|
get_capacity(ca->disk_sb.bdev->bd_disk) <
|
|
|
|
ca->mi.bucket_size * nbuckets) {
|
|
|
|
bch_err(ca, "New size larger than device");
|
2022-11-19 20:39:08 -07:00
|
|
|
ret = -BCH_ERR_device_size_too_small;
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_dev_buckets_resize(c, ca, nbuckets);
|
2023-12-16 20:43:41 -07:00
|
|
|
bch_err_msg(ca, ret, "resizing buckets");
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2024-04-07 15:05:34 -07:00
|
|
|
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
2022-10-19 15:31:33 -07:00
|
|
|
if (ret)
|
2021-06-08 19:50:30 -07:00
|
|
|
goto err;
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
mutex_lock(&c->sb_lock);
|
2023-09-24 21:02:56 -07:00
|
|
|
m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
|
|
|
|
m->nbuckets = cpu_to_le64(nbuckets);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
bch2_write_super(c);
|
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2023-09-28 14:57:21 -07:00
|
|
|
if (ca->mi.freespace_initialized) {
|
2023-11-09 12:22:46 -07:00
|
|
|
struct disk_accounting_pos acc = {
|
|
|
|
.type = BCH_DISK_ACCOUNTING_dev_data_type,
|
|
|
|
.dev_data_type.dev = ca->dev_idx,
|
|
|
|
.dev_data_type.data_type = BCH_DATA_free,
|
|
|
|
};
|
|
|
|
u64 v[3] = { nbuckets - old_nbuckets, 0, 0 };
|
|
|
|
|
2024-10-13 18:53:26 -07:00
|
|
|
ret = bch2_trans_commit_do(ca->fs, NULL, NULL, 0,
|
2024-02-11 20:48:05 -07:00
|
|
|
bch2_disk_accounting_mod(trans, &acc, v, ARRAY_SIZE(v), false)) ?:
|
|
|
|
bch2_dev_freespace_init(c, ca, old_nbuckets, nbuckets);
|
2023-09-28 14:57:21 -07:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_recalc_capacity(c);
|
|
|
|
err:
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* return with ref on ca->ref: */
|
2022-02-16 04:23:06 -07:00
|
|
|
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
|
2017-03-16 23:18:50 -07:00
|
|
|
{
|
2024-05-25 12:36:19 -07:00
|
|
|
if (!strncmp(name, "/dev/", strlen("/dev/")))
|
|
|
|
name += strlen("/dev/");
|
|
|
|
|
2024-05-04 09:51:49 -07:00
|
|
|
for_each_member_device(c, ca)
|
|
|
|
if (!strcmp(name, ca->name))
|
2023-12-17 00:34:05 -07:00
|
|
|
return ca;
|
|
|
|
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Filesystem open: */
|
|
|
|
|
2023-06-27 18:02:27 -07:00
|
|
|
static inline int sb_cmp(struct bch_sb *l, struct bch_sb *r)
|
|
|
|
{
|
|
|
|
return cmp_int(le64_to_cpu(l->seq), le64_to_cpu(r->seq)) ?:
|
|
|
|
cmp_int(le64_to_cpu(l->write_time), le64_to_cpu(r->write_time));
|
|
|
|
}
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
|
|
|
|
struct bch_opts opts)
|
|
|
|
{
|
2023-10-31 19:35:49 -07:00
|
|
|
DARRAY(struct bch_sb_handle) sbs = { 0 };
|
2017-03-16 23:18:50 -07:00
|
|
|
struct bch_fs *c = NULL;
|
2023-12-16 19:40:26 -07:00
|
|
|
struct bch_sb_handle *best = NULL;
|
2022-02-25 11:18:19 -07:00
|
|
|
struct printbuf errbuf = PRINTBUF;
|
2021-11-05 18:28:17 -07:00
|
|
|
int ret = 0;
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-01-03 21:38:50 -07:00
|
|
|
if (!try_module_get(THIS_MODULE))
|
|
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
|
2017-03-16 23:18:50 -07:00
|
|
|
if (!nr_devices) {
|
2022-01-03 21:38:50 -07:00
|
|
|
ret = -EINVAL;
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2023-10-31 19:35:49 -07:00
|
|
|
ret = darray_make_room(&sbs, nr_devices);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2023-10-31 19:35:49 -07:00
|
|
|
for (unsigned i = 0; i < nr_devices; i++) {
|
|
|
|
struct bch_sb_handle sb = { NULL };
|
|
|
|
|
|
|
|
ret = bch2_read_super(devices[i], &opts, &sb);
|
2017-03-16 23:18:50 -07:00
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
2023-10-31 19:35:49 -07:00
|
|
|
BUG_ON(darray_push(&sbs, sb));
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
|
2023-12-23 15:50:29 -07:00
|
|
|
if (opts.nochanges && !opts.read_only) {
|
|
|
|
ret = -BCH_ERR_erofs_nochanges;
|
|
|
|
goto err_print;
|
|
|
|
}
|
|
|
|
|
2023-10-31 19:35:49 -07:00
|
|
|
darray_for_each(sbs, sb)
|
2023-06-27 18:02:27 -07:00
|
|
|
if (!best || sb_cmp(sb->sb, best->sb) > 0)
|
2023-10-31 19:35:49 -07:00
|
|
|
best = sb;
|
|
|
|
|
|
|
|
darray_for_each_reverse(sbs, sb) {
|
2024-03-08 14:03:19 -07:00
|
|
|
ret = bch2_dev_in_fs(best, sb, &opts);
|
2023-06-27 18:02:27 -07:00
|
|
|
|
|
|
|
if (ret == -BCH_ERR_device_has_been_removed ||
|
|
|
|
ret == -BCH_ERR_device_splitbrain) {
|
2023-10-31 19:35:49 -07:00
|
|
|
bch2_free_super(sb);
|
|
|
|
darray_remove_item(&sbs, sb);
|
|
|
|
best -= best > sb;
|
2023-06-27 18:02:27 -07:00
|
|
|
ret = 0;
|
2020-09-06 19:58:28 -07:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err_print;
|
|
|
|
}
|
|
|
|
|
2023-10-31 19:35:49 -07:00
|
|
|
c = bch2_fs_alloc(best->sb, opts);
|
|
|
|
ret = PTR_ERR_OR_ZERO(c);
|
|
|
|
if (ret)
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err;
|
|
|
|
|
2020-06-15 11:58:47 -07:00
|
|
|
down_write(&c->state_lock);
|
2023-10-31 19:35:49 -07:00
|
|
|
darray_for_each(sbs, sb) {
|
|
|
|
ret = bch2_dev_attach_bdev(c, sb);
|
2021-11-05 18:28:17 -07:00
|
|
|
if (ret) {
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2021-11-05 18:28:17 -07:00
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
2021-11-05 18:28:17 -07:00
|
|
|
}
|
2020-06-15 11:58:47 -07:00
|
|
|
up_write(&c->state_lock);
|
2017-03-16 23:18:50 -07:00
|
|
|
|
2022-11-19 20:39:08 -07:00
|
|
|
if (!bch2_fs_may_start(c)) {
|
|
|
|
ret = -BCH_ERR_insufficient_devices_to_start;
|
2017-03-16 23:18:50 -07:00
|
|
|
goto err_print;
|
2022-11-19 20:39:08 -07:00
|
|
|
}
|
2017-03-16 23:18:50 -07:00
|
|
|
|
|
|
|
if (!c->opts.nostart) {
|
2019-04-17 15:21:19 -07:00
|
|
|
ret = bch2_fs_start(c);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
2017-03-16 23:18:50 -07:00
|
|
|
}
|
|
|
|
out:
|
2023-10-31 19:35:49 -07:00
|
|
|
darray_for_each(sbs, sb)
|
|
|
|
bch2_free_super(sb);
|
|
|
|
darray_exit(&sbs);
|
2022-02-25 11:18:19 -07:00
|
|
|
printbuf_exit(&errbuf);
|
2017-03-16 23:18:50 -07:00
|
|
|
module_put(THIS_MODULE);
|
|
|
|
return c;
|
|
|
|
err_print:
|
|
|
|
pr_err("bch_fs_open err opening %s: %s",
|
2022-11-19 20:39:08 -07:00
|
|
|
devices[0], bch2_err_str(ret));
|
2017-03-16 23:18:50 -07:00
|
|
|
err:
|
2021-11-04 14:03:16 -07:00
|
|
|
if (!IS_ERR_OR_NULL(c))
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_fs_stop(c);
|
|
|
|
c = ERR_PTR(ret);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Global interfaces/init */
|
|
|
|
|
|
|
|
static void bcachefs_exit(void)
|
|
|
|
{
|
|
|
|
bch2_debug_exit();
|
|
|
|
bch2_vfs_exit();
|
|
|
|
bch2_chardev_exit();
|
2020-11-18 12:09:33 -07:00
|
|
|
bch2_btree_key_cache_exit();
|
2017-03-16 23:18:50 -07:00
|
|
|
if (bcachefs_kset)
|
|
|
|
kset_unregister(bcachefs_kset);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init bcachefs_init(void)
|
|
|
|
{
|
|
|
|
bch2_bkey_pack_test();
|
|
|
|
|
|
|
|
if (!(bcachefs_kset = kset_create_and_add("bcachefs", NULL, fs_kobj)) ||
|
2020-11-18 12:09:33 -07:00
|
|
|
bch2_btree_key_cache_init() ||
|
2017-03-16 23:18:50 -07:00
|
|
|
bch2_chardev_init() ||
|
|
|
|
bch2_vfs_init() ||
|
|
|
|
bch2_debug_init())
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
err:
|
|
|
|
bcachefs_exit();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BCH_DEBUG_PARAM(name, description) \
|
|
|
|
bool bch2_##name; \
|
|
|
|
module_param_named(name, bch2_##name, bool, 0644); \
|
|
|
|
MODULE_PARM_DESC(name, description);
|
|
|
|
BCH_DEBUG_PARAMS()
|
|
|
|
#undef BCH_DEBUG_PARAM
|
|
|
|
|
2023-08-06 07:04:37 -07:00
|
|
|
__maybe_unused
|
2023-07-06 19:47:42 -07:00
|
|
|
static unsigned bch2_metadata_version = bcachefs_metadata_version_current;
|
2017-03-16 23:18:50 -07:00
|
|
|
module_param_named(version, bch2_metadata_version, uint, 0400);
|
|
|
|
|
|
|
|
module_exit(bcachefs_exit);
|
|
|
|
module_init(bcachefs_init);
|