btrfs: zoned: properly take lock to read/update block group's zoned variables
__btrfs_add_free_space_zoned() references and modifies bg's alloc_offset,
ro, and zone_unusable, but without taking the lock. It is mostly safe
because they monotonically increase (at least for now) and this function is
mostly called by a transaction commit, which is serialized by itself.
Still, taking the lock is a safer and correct option and I'm going to add a
change to reset zone_unusable while a block group is still alive. So, add
locking around the operations.
Fixes: 169e0da91a
("btrfs: zoned: track unusable bytes for zones")
CC: stable@vger.kernel.org # 5.15+
Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
008e2512dc
commit
e30729d4bd
@ -2697,15 +2697,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
|||||||
u64 offset = bytenr - block_group->start;
|
u64 offset = bytenr - block_group->start;
|
||||||
u64 to_free, to_unusable;
|
u64 to_free, to_unusable;
|
||||||
int bg_reclaim_threshold = 0;
|
int bg_reclaim_threshold = 0;
|
||||||
bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
|
bool initial;
|
||||||
u64 reclaimable_unusable;
|
u64 reclaimable_unusable;
|
||||||
|
|
||||||
WARN_ON(!initial && offset + size > block_group->zone_capacity);
|
spin_lock(&block_group->lock);
|
||||||
|
|
||||||
|
initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
|
||||||
|
WARN_ON(!initial && offset + size > block_group->zone_capacity);
|
||||||
if (!initial)
|
if (!initial)
|
||||||
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
|
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
|
||||||
|
|
||||||
spin_lock(&ctl->tree_lock);
|
|
||||||
if (!used)
|
if (!used)
|
||||||
to_free = size;
|
to_free = size;
|
||||||
else if (initial)
|
else if (initial)
|
||||||
@ -2718,7 +2719,9 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
|||||||
to_free = offset + size - block_group->alloc_offset;
|
to_free = offset + size - block_group->alloc_offset;
|
||||||
to_unusable = size - to_free;
|
to_unusable = size - to_free;
|
||||||
|
|
||||||
|
spin_lock(&ctl->tree_lock);
|
||||||
ctl->free_space += to_free;
|
ctl->free_space += to_free;
|
||||||
|
spin_unlock(&ctl->tree_lock);
|
||||||
/*
|
/*
|
||||||
* If the block group is read-only, we should account freed space into
|
* If the block group is read-only, we should account freed space into
|
||||||
* bytes_readonly.
|
* bytes_readonly.
|
||||||
@ -2727,11 +2730,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
|||||||
block_group->zone_unusable += to_unusable;
|
block_group->zone_unusable += to_unusable;
|
||||||
WARN_ON(block_group->zone_unusable > block_group->length);
|
WARN_ON(block_group->zone_unusable > block_group->length);
|
||||||
}
|
}
|
||||||
spin_unlock(&ctl->tree_lock);
|
|
||||||
if (!used) {
|
if (!used) {
|
||||||
spin_lock(&block_group->lock);
|
|
||||||
block_group->alloc_offset -= size;
|
block_group->alloc_offset -= size;
|
||||||
spin_unlock(&block_group->lock);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
reclaimable_unusable = block_group->zone_unusable -
|
reclaimable_unusable = block_group->zone_unusable -
|
||||||
@ -2745,6 +2745,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
|||||||
btrfs_mark_bg_to_reclaim(block_group);
|
btrfs_mark_bg_to_reclaim(block_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_unlock(&block_group->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user