bcachefs: Assorted checkpatch fixes
checkpatch.pl gives lots of warnings that we don't want - suggested ignore list: ASSIGN_IN_IF UNSPECIFIED_INT - bcachefs coding style prefers single token type names NEW_TYPEDEFS - typedefs are occasionally good FUNCTION_ARGUMENTS - we prefer to look at functions in .c files (hopefully with docbook documentation), not .h file prototypes MULTISTATEMENT_MACRO_USE_DO_WHILE - we have _many_ x-macros and other macros where we can't do this Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
ed80c5699a
commit
3e3e02e6bc
@ -173,7 +173,7 @@ bch2_acl_to_xattr(struct btree_trans *trans,
|
||||
bkey_xattr_init(&xattr->k_i);
|
||||
xattr->k.u64s = u64s;
|
||||
xattr->v.x_type = acl_to_xattr_type(type);
|
||||
xattr->v.x_name_len = 0,
|
||||
xattr->v.x_name_len = 0;
|
||||
xattr->v.x_val_len = cpu_to_le16(acl_len);
|
||||
|
||||
acl_header = xattr_val(&xattr->v);
|
||||
|
@ -490,7 +490,7 @@ again:
|
||||
* bch_bucket_alloc - allocate a single bucket from a specific device
|
||||
*
|
||||
* Returns index of bucket on success, 0 on failure
|
||||
* */
|
||||
*/
|
||||
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
|
@ -107,7 +107,7 @@
|
||||
*
|
||||
* BTREE NODES:
|
||||
*
|
||||
* Our unit of allocation is a bucket, and we we can't arbitrarily allocate and
|
||||
* Our unit of allocation is a bucket, and we can't arbitrarily allocate and
|
||||
* free smaller than a bucket - so, that's how big our btree nodes are.
|
||||
*
|
||||
* (If buckets are really big we'll only use part of the bucket for a btree node
|
||||
|
@ -340,7 +340,7 @@ static inline void bkey_init(struct bkey *k)
|
||||
* number.
|
||||
*
|
||||
* - WHITEOUT: for hash table btrees
|
||||
*/
|
||||
*/
|
||||
#define BCH_BKEY_TYPES() \
|
||||
x(deleted, 0) \
|
||||
x(whiteout, 1) \
|
||||
@ -783,16 +783,16 @@ enum {
|
||||
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
|
||||
* flags)
|
||||
*/
|
||||
__BCH_INODE_SYNC = 0,
|
||||
__BCH_INODE_IMMUTABLE = 1,
|
||||
__BCH_INODE_APPEND = 2,
|
||||
__BCH_INODE_NODUMP = 3,
|
||||
__BCH_INODE_NOATIME = 4,
|
||||
__BCH_INODE_SYNC = 0,
|
||||
__BCH_INODE_IMMUTABLE = 1,
|
||||
__BCH_INODE_APPEND = 2,
|
||||
__BCH_INODE_NODUMP = 3,
|
||||
__BCH_INODE_NOATIME = 4,
|
||||
|
||||
__BCH_INODE_I_SIZE_DIRTY= 5,
|
||||
__BCH_INODE_I_SECTORS_DIRTY= 6,
|
||||
__BCH_INODE_UNLINKED = 7,
|
||||
__BCH_INODE_BACKPTR_UNTRUSTED = 8,
|
||||
__BCH_INODE_I_SIZE_DIRTY = 5,
|
||||
__BCH_INODE_I_SECTORS_DIRTY = 6,
|
||||
__BCH_INODE_UNLINKED = 7,
|
||||
__BCH_INODE_BACKPTR_UNTRUSTED = 8,
|
||||
|
||||
/* bits 20+ reserved for packed fields below: */
|
||||
};
|
||||
|
@ -1113,10 +1113,10 @@ int bch2_bkey_cmp_packed(const struct btree *b,
|
||||
|
||||
if (bkey_packed(l)) {
|
||||
__bkey_unpack_key_format_checked(b, &unpacked, l);
|
||||
l = (void*) &unpacked;
|
||||
l = (void *) &unpacked;
|
||||
} else if (bkey_packed(r)) {
|
||||
__bkey_unpack_key_format_checked(b, &unpacked, r);
|
||||
r = (void*) &unpacked;
|
||||
r = (void *) &unpacked;
|
||||
}
|
||||
|
||||
return bpos_cmp(((struct bkey *) l)->p, ((struct bkey *) r)->p);
|
||||
|
@ -142,8 +142,9 @@ int bkey_cmp_left_packed(const struct btree *b,
|
||||
}
|
||||
|
||||
/*
|
||||
* we prefer to pass bpos by ref, but it's often enough terribly convenient to
|
||||
* pass it by by val... as much as I hate c++, const ref would be nice here:
|
||||
* The compiler generates better code when we pass bpos by ref, but it's often
|
||||
* enough terribly convenient to pass it by val... as much as I hate c++, const
|
||||
* ref would be nice here:
|
||||
*/
|
||||
__pure __flatten
|
||||
static inline int bkey_cmp_left_packed_byval(const struct btree *b,
|
||||
|
@ -953,7 +953,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
|
||||
t->size -= j - l;
|
||||
|
||||
for (j = l; j < t->size; j++)
|
||||
rw_aux_tree(b, t)[j].offset += shift;
|
||||
rw_aux_tree(b, t)[j].offset += shift;
|
||||
|
||||
EBUG_ON(l < t->size &&
|
||||
rw_aux_tree(b, t)[l].offset ==
|
||||
@ -1254,7 +1254,7 @@ void bch2_btree_node_iter_push(struct btree_node_iter *iter,
|
||||
bch2_btree_node_iter_sort(iter, b);
|
||||
}
|
||||
|
||||
noinline __flatten __attribute__((cold))
|
||||
noinline __flatten __cold
|
||||
static void btree_node_iter_init_pack_failed(struct btree_node_iter *iter,
|
||||
struct btree *b, struct bpos *search)
|
||||
{
|
||||
|
@ -819,7 +819,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
|
||||
if (likely(c->opts.btree_node_mem_ptr_optimization &&
|
||||
b &&
|
||||
b->hash_val == btree_ptr_hash_val(k)))
|
||||
goto lock_node;
|
||||
goto lock_node;
|
||||
retry:
|
||||
b = btree_cache_find(bc, k);
|
||||
if (unlikely(!b)) {
|
||||
@ -1059,7 +1059,7 @@ wait_on_io:
|
||||
|
||||
/* XXX we're called from btree_gc which will be holding other btree
|
||||
* nodes locked
|
||||
* */
|
||||
*/
|
||||
__bch2_btree_node_wait_on_read(b);
|
||||
__bch2_btree_node_wait_on_write(b);
|
||||
|
||||
|
@ -318,7 +318,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
" node %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1.buf, buf2.buf))
|
||||
ret = set_node_min(c, cur, expected_start);
|
||||
ret = set_node_min(c, cur, expected_start);
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
|
@ -2832,7 +2832,7 @@ static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
|
||||
BUG_ON(trans->used_mempool);
|
||||
|
||||
#ifdef __KERNEL__
|
||||
p = this_cpu_xchg(c->btree_paths_bufs->path , NULL);
|
||||
p = this_cpu_xchg(c->btree_paths_bufs->path, NULL);
|
||||
#endif
|
||||
if (!p)
|
||||
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
|
||||
@ -3002,7 +3002,7 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
|
||||
|
||||
rcu_read_lock();
|
||||
owner = READ_ONCE(b->lock.owner);
|
||||
pid = owner ? owner->pid : 0;;
|
||||
pid = owner ? owner->pid : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
prt_tab(out);
|
||||
|
@ -1,3 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "btree_cache.h"
|
||||
@ -315,7 +316,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
|
||||
bool was_new = true;
|
||||
|
||||
ck = bkey_cached_alloc(trans, path);
|
||||
if (unlikely(IS_ERR(ck)))
|
||||
if (IS_ERR(ck))
|
||||
return ck;
|
||||
|
||||
if (unlikely(!ck)) {
|
||||
@ -435,7 +436,7 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
noinline static int
|
||||
static noinline int
|
||||
bch2_btree_path_traverse_cached_slowpath(struct btree_trans *trans, struct btree_path *path,
|
||||
unsigned flags)
|
||||
{
|
||||
@ -616,7 +617,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
* Since journal reclaim depends on us making progress here, and the
|
||||
* allocator/copygc depend on journal reclaim making progress, we need
|
||||
* to be using alloc reserves:
|
||||
* */
|
||||
*/
|
||||
ret = bch2_btree_iter_traverse(&b_iter) ?:
|
||||
bch2_trans_update(trans, &b_iter, ck->k,
|
||||
BTREE_UPDATE_KEY_CACHE_RECLAIM|
|
||||
@ -1019,8 +1020,7 @@ void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *
|
||||
|
||||
void bch2_btree_key_cache_exit(void)
|
||||
{
|
||||
if (bch2_key_cache)
|
||||
kmem_cache_destroy(bch2_key_cache);
|
||||
kmem_cache_destroy(bch2_key_cache);
|
||||
}
|
||||
|
||||
int __init bch2_btree_key_cache_init(void)
|
||||
|
@ -681,7 +681,7 @@ int bch2_trans_relock(struct btree_trans *trans)
|
||||
struct btree_path *path;
|
||||
|
||||
if (unlikely(trans->restarted))
|
||||
return - ((int) trans->restarted);
|
||||
return -((int) trans->restarted);
|
||||
|
||||
trans_for_each_path(trans, path)
|
||||
if (path->should_be_locked &&
|
||||
|
@ -2047,7 +2047,7 @@ static int async_btree_node_rewrite_trans(struct btree_trans *trans,
|
||||
goto out;
|
||||
|
||||
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
|
||||
out :
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
return ret;
|
||||
|
@ -933,7 +933,7 @@ int bch2_mark_extent(struct btree_trans *trans,
|
||||
{
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
@ -1152,7 +1152,7 @@ int bch2_mark_reservation(struct btree_trans *trans,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bch_fs_usage __percpu *fs_usage;
|
||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||
s64 sectors = (s64) k.k->size;
|
||||
@ -1231,7 +1231,7 @@ int bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old: new;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
||||
struct reflink_gc *ref;
|
||||
size_t l, r, m;
|
||||
@ -2102,5 +2102,5 @@ int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);;
|
||||
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
|
||||
size_t orig_len = len;
|
||||
int ret, i;
|
||||
|
||||
sg = kmalloc_array(sizeof(*sg), pages, GFP_KERNEL);
|
||||
sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
|
||||
if (!sg)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -315,7 +315,7 @@ int bch2_data_update_init(struct bch_fs *c, struct data_update *m,
|
||||
bch2_write_op_init(&m->op, c, io_opts);
|
||||
m->op.pos = bkey_start_pos(k.k);
|
||||
m->op.version = k.k->version;
|
||||
m->op.target = data_opts.target,
|
||||
m->op.target = data_opts.target;
|
||||
m->op.write_point = wp;
|
||||
m->op.flags |= BCH_WRITE_PAGES_STABLE|
|
||||
BCH_WRITE_PAGES_OWNED|
|
||||
|
@ -476,7 +476,7 @@ static ssize_t bch2_cached_btree_nodes_read(struct file *file, char __user *buf,
|
||||
if (i->iter < tbl->size) {
|
||||
rht_for_each_entry_rcu(b, pos, tbl, i->iter, hash)
|
||||
bch2_cached_btree_node_to_text(&i->buf, c, b);
|
||||
i->iter++;;
|
||||
i->iter++;
|
||||
} else {
|
||||
done = true;
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ int bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
|
||||
if (bkey_val_u64s(k.k) > dirent_val_u64s(len)) {
|
||||
prt_printf(err, "value too big (%zu > %u)",
|
||||
bkey_val_u64s(k.k),dirent_val_u64s(len));
|
||||
bkey_val_u64s(k.k), dirent_val_u64s(len));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -291,7 +291,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
||||
if (lp.crc.offset + lp.crc.live_size + rp.crc.live_size <=
|
||||
lp.crc.uncompressed_size) {
|
||||
/* can use left extent's crc entry */
|
||||
} else if (lp.crc.live_size <= rp.crc.offset ) {
|
||||
} else if (lp.crc.live_size <= rp.crc.offset) {
|
||||
/* can use right extent's crc entry */
|
||||
} else {
|
||||
/* check if checksums can be merged: */
|
||||
@ -350,7 +350,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
||||
if (crc_l.offset + crc_l.live_size + crc_r.live_size <=
|
||||
crc_l.uncompressed_size) {
|
||||
/* can use left extent's crc entry */
|
||||
} else if (crc_l.live_size <= crc_r.offset ) {
|
||||
} else if (crc_l.live_size <= crc_r.offset) {
|
||||
/* can use right extent's crc entry */
|
||||
crc_r.offset -= crc_l.live_size;
|
||||
bch2_extent_crc_pack(entry_to_crc(en_l), crc_r,
|
||||
|
@ -487,11 +487,11 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
ret = bch2_inode_write(trans, &src_dir_iter, src_dir_u) ?:
|
||||
(src_dir.inum != dst_dir.inum
|
||||
? bch2_inode_write(trans, &dst_dir_iter, dst_dir_u)
|
||||
: 0 ) ?:
|
||||
: 0) ?:
|
||||
bch2_inode_write(trans, &src_inode_iter, src_inode_u) ?:
|
||||
(dst_inum.inum
|
||||
? bch2_inode_write(trans, &dst_inode_iter, dst_inode_u)
|
||||
: 0 );
|
||||
: 0);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &dst_inode_iter);
|
||||
bch2_trans_iter_exit(trans, &src_inode_iter);
|
||||
|
@ -2724,7 +2724,7 @@ static long bchfs_fpunch(struct bch_inode_info *inode, loff_t offset, loff_t len
|
||||
|
||||
truncate_pagecache_range(&inode->v, offset, end - 1);
|
||||
|
||||
if (block_start < block_end ) {
|
||||
if (block_start < block_end) {
|
||||
s64 i_sectors_delta = 0;
|
||||
|
||||
ret = bch2_fpunch(c, inode_inum(inode),
|
||||
|
@ -528,7 +528,7 @@ static int bch2_symlink(struct mnt_idmap *idmap,
|
||||
|
||||
inode = __bch2_create(idmap, dir, dentry, S_IFLNK|S_IRWXUGO, 0,
|
||||
(subvol_inum) { 0 }, BCH_CREATE_TMPFILE);
|
||||
if (unlikely(IS_ERR(inode)))
|
||||
if (IS_ERR(inode))
|
||||
return bch2_err_class(PTR_ERR(inode));
|
||||
|
||||
inode_lock(&inode->v);
|
||||
@ -1847,7 +1847,7 @@ got_sb:
|
||||
sb->s_time_min = div_s64(S64_MIN, c->sb.time_units_per_sec) + 1;
|
||||
sb->s_time_max = div_s64(S64_MAX, c->sb.time_units_per_sec);
|
||||
c->vfs_sb = sb;
|
||||
strlcpy(sb->s_id, c->name, sizeof(sb->s_id));
|
||||
strscpy(sb->s_id, c->name, sizeof(sb->s_id));
|
||||
|
||||
ret = super_setup_bdi(sb);
|
||||
if (ret)
|
||||
@ -1918,8 +1918,7 @@ MODULE_ALIAS_FS("bcachefs");
|
||||
void bch2_vfs_exit(void)
|
||||
{
|
||||
unregister_filesystem(&bcache_fs_type);
|
||||
if (bch2_inode_cache)
|
||||
kmem_cache_destroy(bch2_inode_cache);
|
||||
kmem_cache_destroy(bch2_inode_cache);
|
||||
}
|
||||
|
||||
int __init bch2_vfs_init(void)
|
||||
|
@ -2044,7 +2044,8 @@ static int add_nlink(struct bch_fs *c, struct nlink_table *t,
|
||||
{
|
||||
if (t->nr == t->size) {
|
||||
size_t new_size = max_t(size_t, 128UL, t->size * 2);
|
||||
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
|
||||
void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
|
||||
|
||||
if (!d) {
|
||||
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
|
||||
new_size);
|
||||
|
@ -314,7 +314,7 @@ static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bch2_inode_unpack(k, &unpacked)){
|
||||
if (bch2_inode_unpack(k, &unpacked)) {
|
||||
prt_printf(err, "invalid variable length fields");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -738,7 +738,7 @@ int bch2_journal_log_msg(struct journal *j, const char *fmt, ...)
|
||||
return ret;
|
||||
|
||||
entry = container_of(journal_res_entry(j, &res),
|
||||
struct jset_entry_log, entry);;
|
||||
struct jset_entry_log, entry);
|
||||
memset(entry, 0, u64s * sizeof(u64));
|
||||
entry->entry.type = BCH_JSET_ENTRY_log;
|
||||
entry->entry.u64s = u64s - 1;
|
||||
@ -795,10 +795,10 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
bch2_journal_block(&c->journal);
|
||||
}
|
||||
|
||||
bu = kzalloc(nr_want * sizeof(*bu), GFP_KERNEL);
|
||||
ob = kzalloc(nr_want * sizeof(*ob), GFP_KERNEL);
|
||||
new_buckets = kzalloc(nr * sizeof(u64), GFP_KERNEL);
|
||||
new_bucket_seq = kzalloc(nr * sizeof(u64), GFP_KERNEL);
|
||||
bu = kcalloc(nr_want, sizeof(*bu), GFP_KERNEL);
|
||||
ob = kcalloc(nr_want, sizeof(*ob), GFP_KERNEL);
|
||||
new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
|
||||
new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
|
||||
if (!bu || !ob || !new_buckets || !new_bucket_seq) {
|
||||
ret = -ENOMEM;
|
||||
goto err_unblock;
|
||||
@ -1264,7 +1264,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
rcu_read_lock();
|
||||
s = READ_ONCE(j->reservations);
|
||||
|
||||
prt_printf(out, "dirty journal entries:\t%llu/%llu\n",fifo_used(&j->pin), j->pin.size);
|
||||
prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
|
||||
prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
|
||||
prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
|
||||
prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
|
||||
|
@ -232,7 +232,7 @@ void bch2_journal_space_available(struct journal *j)
|
||||
if ((j->space[journal_space_clean_ondisk].next_entry <
|
||||
j->space[journal_space_clean_ondisk].total) &&
|
||||
(clean - clean_ondisk <= total / 8) &&
|
||||
(clean_ondisk * 2 > clean ))
|
||||
(clean_ondisk * 2 > clean))
|
||||
set_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
|
||||
else
|
||||
clear_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags);
|
||||
|
@ -31,7 +31,7 @@ static int bch2_sb_journal_validate(struct bch_sb *sb,
|
||||
if (!nr)
|
||||
return 0;
|
||||
|
||||
b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
|
||||
b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
|
||||
if (!b)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -114,7 +114,7 @@ static int bch2_sb_journal_v2_validate(struct bch_sb *sb,
|
||||
if (!nr)
|
||||
return 0;
|
||||
|
||||
b = kmalloc_array(sizeof(*b), nr, GFP_KERNEL);
|
||||
b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
|
||||
if (!b)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -479,7 +479,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
|
||||
/*
|
||||
* The iterator gets unlocked by __bch2_read_extent - need to
|
||||
* save a copy of @k elsewhere:
|
||||
*/
|
||||
*/
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
k = bkey_i_to_s_c(sk.k);
|
||||
|
||||
@ -667,7 +667,7 @@ static bool migrate_pred(struct bch_fs *c, void *arg,
|
||||
i++;
|
||||
}
|
||||
|
||||
return data_opts->rewrite_ptrs != 0;;
|
||||
return data_opts->rewrite_ptrs != 0;
|
||||
}
|
||||
|
||||
static bool rereplicate_btree_pred(struct bch_fs *c, void *arg,
|
||||
|
@ -224,7 +224,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
.size = max_t(size_t, keys->size, 8) * 2,
|
||||
};
|
||||
|
||||
new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
|
||||
new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
|
||||
if (!new_keys.d) {
|
||||
bch_err(c, "%s: error allocating new key array (size %zu)",
|
||||
__func__, new_keys.size);
|
||||
@ -501,7 +501,7 @@ static int journal_keys_sort(struct bch_fs *c)
|
||||
|
||||
keys->size = roundup_pow_of_two(nr_keys);
|
||||
|
||||
keys->d = kvmalloc(sizeof(keys->d[0]) * keys->size, GFP_KERNEL);
|
||||
keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
|
||||
if (!keys->d)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -160,7 +160,7 @@ u64 SipHash_End(SIPHASH_CTX *ctx, int rc, int rf)
|
||||
|
||||
r = (ctx->v[0] ^ ctx->v[1]) ^ (ctx->v[2] ^ ctx->v[3]);
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
return (r);
|
||||
return r;
|
||||
}
|
||||
|
||||
u64 SipHash(const SIPHASH_KEY *key, int rc, int rf, const void *src, size_t len)
|
||||
|
@ -101,8 +101,7 @@ void bch2_sb_field_delete(struct bch_sb_handle *sb,
|
||||
|
||||
void bch2_free_super(struct bch_sb_handle *sb)
|
||||
{
|
||||
if (sb->bio)
|
||||
kfree(sb->bio);
|
||||
kfree(sb->bio);
|
||||
if (!IS_ERR_OR_NULL(sb->bdev))
|
||||
blkdev_put(sb->bdev, sb->holder);
|
||||
kfree(sb->holder);
|
||||
@ -151,8 +150,7 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
|
||||
|
||||
if (sb->bio)
|
||||
kfree(sb->bio);
|
||||
kfree(sb->bio);
|
||||
sb->bio = bio;
|
||||
}
|
||||
|
||||
|
@ -461,8 +461,8 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
kfree(c->unused_inode_hints);
|
||||
free_heap(&c->copygc_heap);
|
||||
|
||||
if (c->io_complete_wq )
|
||||
destroy_workqueue(c->io_complete_wq );
|
||||
if (c->io_complete_wq)
|
||||
destroy_workqueue(c->io_complete_wq);
|
||||
if (c->copygc_wq)
|
||||
destroy_workqueue(c->copygc_wq);
|
||||
if (c->btree_io_complete_wq)
|
||||
@ -712,7 +712,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
goto err;
|
||||
|
||||
pr_uuid(&name, c->sb.user_uuid.b);
|
||||
strlcpy(c->name, name.buf, sizeof(c->name));
|
||||
strscpy(c->name, name.buf, sizeof(c->name));
|
||||
printbuf_exit(&name);
|
||||
|
||||
ret = name.allocation_failure ? -ENOMEM : 0;
|
||||
@ -1786,9 +1786,8 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
}
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret) {
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
mi = &bch2_sb_get_members(c->disk_sb.sb)->members[ca->dev_idx];
|
||||
|
@ -789,8 +789,6 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
|
||||
}
|
||||
}
|
||||
|
||||
#include "eytzinger.h"
|
||||
|
||||
static int alignment_ok(const void *base, size_t align)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
|
Loading…
Reference in New Issue
Block a user