1

dm vdo: remove internal ticket references

Signed-off-by: Susan LeGendre-McGhee <slegendr@redhat.com>
Signed-off-by: Matthew Sakai <msakai@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
This commit is contained in:
Susan LeGendre-McGhee 2024-02-15 11:35:15 -05:00 committed by Mike Snitzer
parent 6a87a8a258
commit dcd1332bb5
11 changed files with 41 additions and 39 deletions

View File

@ -542,7 +542,7 @@ static unsigned int distribute_page_over_waitq(struct page_info *info,
/* /*
* Increment the busy count once for each pending completion so that this page does not * Increment the busy count once for each pending completion so that this page does not
* stop being busy until all completions have been processed (VDO-83). * stop being busy until all completions have been processed.
*/ */
info->busy += num_pages; info->busy += num_pages;
@ -1097,9 +1097,9 @@ static void write_pages(struct vdo_completion *flush_completion)
struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache; struct vdo_page_cache *cache = ((struct page_info *) flush_completion->parent)->cache;
/* /*
* We need to cache these two values on the stack since in the error case below, it is * We need to cache these two values on the stack since it is possible for the last
* possible for the last page info to cause the page cache to get freed. Hence once we * page info to cause the page cache to get freed. Hence once we launch the last page,
* launch the last page, it may be unsafe to dereference the cache [VDO-4724]. * it may be unsafe to dereference the cache.
*/ */
bool has_unflushed_pages = (cache->pages_to_flush > 0); bool has_unflushed_pages = (cache->pages_to_flush > 0);
page_count_t pages_in_flush = cache->pages_in_flush; page_count_t pages_in_flush = cache->pages_in_flush;

View File

@ -453,10 +453,11 @@ static void attempt_logical_block_lock(struct vdo_completion *completion)
/* /*
* If the new request is a pure read request (not read-modify-write) and the lock_holder is * If the new request is a pure read request (not read-modify-write) and the lock_holder is
* writing and has received an allocation (VDO-2683), service the read request immediately * writing and has received an allocation, service the read request immediately by copying
* by copying data from the lock_holder to avoid having to flush the write out of the * data from the lock_holder to avoid having to flush the write out of the packer just to
* packer just to prevent the read from waiting indefinitely. If the lock_holder does not * prevent the read from waiting indefinitely. If the lock_holder does not yet have an
* yet have an allocation, prevent it from blocking in the packer and wait on it. * allocation, prevent it from blocking in the packer and wait on it. This is necessary in
* order to prevent returning data that may not have actually been written.
*/ */
if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) { if (!data_vio->write && READ_ONCE(lock_holder->allocation_succeeded)) {
copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset); copy_to_bio(data_vio->user_bio, lock_holder->vio.data + data_vio->offset);

View File

@ -945,13 +945,11 @@ static void vdo_io_hints(struct dm_target *ti, struct queue_limits *limits)
* Sets the maximum discard size that will be passed into VDO. This value comes from a * Sets the maximum discard size that will be passed into VDO. This value comes from a
* table line value passed in during dmsetup create. * table line value passed in during dmsetup create.
* *
* The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a * The value 1024 is the largest usable value on HD systems. A 2048 sector discard on a
* busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes * busy HD system takes 31 seconds. We should use a value no higher than 1024, which takes
* 15 to 16 seconds on a busy HD system. * 15 to 16 seconds on a busy HD system. However, using large values results in 120 second
* * blocked task warnings in kernel logs. In order to avoid these warnings, we choose to
* But using large values results in 120 second blocked task warnings in /var/log/kern.log. * use the smallest reasonable value.
* In order to avoid these warnings, we choose to use the smallest reasonable value. See
* VDO-3062 and VDO-3087.
* *
* The value is displayed in sysfs, and also used by dm-thin to determine whether to pass * The value is displayed in sysfs, and also used by dm-thin to determine whether to pass
* down discards. The block layer splits large discards on this boundary when this is set. * down discards. The block layer splits large discards on this boundary when this is set.

View File

@ -235,8 +235,8 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr)
if (p == NULL) { if (p == NULL) {
/* /*
* It is possible for kmalloc to fail to allocate memory because there is * It is possible for kmalloc to fail to allocate memory because there is
* no page available (see VDO-3688). A short sleep may allow the page * no page available. A short sleep may allow the page reclaimer to
* reclaimer to free a page. * free a page.
*/ */
fsleep(1000); fsleep(1000);
p = kmalloc(size, gfp_flags); p = kmalloc(size, gfp_flags);
@ -251,8 +251,8 @@ int uds_allocate_memory(size_t size, size_t align, const char *what, void *ptr)
UDS_SUCCESS) { UDS_SUCCESS) {
/* /*
* It is possible for __vmalloc to fail to allocate memory because there * It is possible for __vmalloc to fail to allocate memory because there
* are no pages available (see VDO-3661). A short sleep may allow the page * are no pages available. A short sleep may allow the page reclaimer
* reclaimer to free enough pages for a small allocation. * to free enough pages for a small allocation.
* *
* For larger allocations, the page_alloc code is racing against the page * For larger allocations, the page_alloc code is racing against the page
* reclaimer. If the page reclaimer can stay ahead of page_alloc, the * reclaimer. If the page reclaimer can stay ahead of page_alloc, the

View File

@ -595,15 +595,13 @@ void vdo_attempt_packing(struct data_vio *data_vio)
} }
/* /*
* The check of may_vio_block_in_packer() here will set the data_vio's compression state to * The advance_data_vio_compression_stage() check here verifies that the data_vio is
* VIO_PACKING if the data_vio is allowed to be compressed (if it has already been * allowed to be compressed (if it has already been canceled, we'll fall out here). Once
* canceled, we'll fall out here). Once the data_vio is in the VIO_PACKING state, it must * the data_vio is in the DATA_VIO_PACKING state, it must be guaranteed to be put in a bin
* be guaranteed to be put in a bin before any more requests can be processed by the packer * before any more requests can be processed by the packer thread. Otherwise, a canceling
* thread. Otherwise, a canceling data_vio could attempt to remove the canceled data_vio * data_vio could attempt to remove the canceled data_vio from the packer and fail to
* from the packer and fail to rendezvous with it (VDO-2809). We must also make sure that * rendezvous with it. Thus, we must call select_bin() first to ensure that we will
* we will actually bin the data_vio and not give up on it as being larger than the space * actually add the data_vio to a bin before advancing to the DATA_VIO_PACKING stage.
* used in the fullest bin. Hence we must call select_bin() before calling
* may_vio_block_in_packer() (VDO-2826).
*/ */
bin = select_bin(packer, data_vio); bin = select_bin(packer, data_vio);
if ((bin == NULL) || if ((bin == NULL) ||

View File

@ -58,7 +58,7 @@ struct compressed_block {
* *
* There is one special bin which is used to hold data_vios which have been canceled and removed * There is one special bin which is used to hold data_vios which have been canceled and removed
* from their bin by the packer. These data_vios need to wait for the canceller to rendezvous with * from their bin by the packer. These data_vios need to wait for the canceller to rendezvous with
* them (VDO-2809) and so they sit in this special bin. * them and so they sit in this special bin.
*/ */
struct packer_bin { struct packer_bin {
/* List links for packer.packer_bins */ /* List links for packer.packer_bins */

View File

@ -1504,8 +1504,8 @@ static int extract_new_mappings(struct repair_completion *repair)
static noinline int compute_usages(struct repair_completion *repair) static noinline int compute_usages(struct repair_completion *repair)
{ {
/* /*
* VDO-5182: function is declared noinline to avoid what is likely a spurious valgrind * This function is declared noinline to avoid a spurious valgrind error regarding the
* error about this structure being uninitialized. * following structure being uninitialized.
*/ */
struct recovery_point recovery_point = { struct recovery_point recovery_point = {
.sequence_number = repair->tail, .sequence_number = repair->tail,

View File

@ -334,7 +334,11 @@ static void launch_write(struct slab_summary_block *block)
/* /*
* Flush before writing to ensure that the slab journal tail blocks and reference updates * Flush before writing to ensure that the slab journal tail blocks and reference updates
* covered by this summary update are stable (VDO-2332). * covered by this summary update are stable. Otherwise, a subsequent recovery could
* encounter a slab summary update that refers to a slab journal tail block that has not
* actually been written. In such cases, the slab journal referenced will be treated as
* empty, causing any data within the slab which predates the existing recovery journal
* entries to be lost.
*/ */
pbn = (depot->summary_origin + pbn = (depot->summary_origin +
(VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) + (VDO_SLAB_SUMMARY_BLOCKS_PER_ZONE * allocator->zone_number) +
@ -499,7 +503,7 @@ static void reap_slab_journal(struct slab_journal *journal)
* journal block writes can be issued while previous slab summary updates have not yet been * journal block writes can be issued while previous slab summary updates have not yet been
* made. Even though those slab journal block writes will be ignored if the slab summary * made. Even though those slab journal block writes will be ignored if the slab summary
* update is not persisted, they may still overwrite the to-be-reaped slab journal block * update is not persisted, they may still overwrite the to-be-reaped slab journal block
* resulting in a loss of reference count updates (VDO-2912). * resulting in a loss of reference count updates.
*/ */
journal->flush_waiter.callback = flush_for_reaping; journal->flush_waiter.callback = flush_for_reaping;
acquire_vio_from_pool(journal->slab->allocator->vio_pool, acquire_vio_from_pool(journal->slab->allocator->vio_pool,
@ -770,7 +774,8 @@ static void write_slab_journal_block(struct vdo_waiter *waiter, void *context)
/* /*
* This block won't be read in recovery until the slab summary is updated to refer to it. * This block won't be read in recovery until the slab summary is updated to refer to it.
* The slab summary update does a flush which is sufficient to protect us from VDO-2331. * The slab summary update does a flush which is sufficient to protect us from corruption
* due to out of order slab journal, reference block, or block map writes.
*/ */
vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio, vdo_submit_metadata_vio(uds_forget(vio), block_number, write_slab_journal_endio,
complete_write, REQ_OP_WRITE); complete_write, REQ_OP_WRITE);
@ -1201,7 +1206,8 @@ static void write_reference_block(struct vdo_waiter *waiter, void *context)
/* /*
* Flush before writing to ensure that the recovery journal and slab journal entries which * Flush before writing to ensure that the recovery journal and slab journal entries which
* cover this reference update are stable (VDO-2331). * cover this reference update are stable. This prevents data corruption that can be caused
* by out of order writes.
*/ */
WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written, WRITE_ONCE(block->slab->allocator->ref_counts_statistics.blocks_written,
block->slab->allocator->ref_counts_statistics.blocks_written + 1); block->slab->allocator->ref_counts_statistics.blocks_written + 1);
@ -1775,7 +1781,7 @@ static void add_entries(struct slab_journal *journal)
(journal->slab->status == VDO_SLAB_REBUILDING)) { (journal->slab->status == VDO_SLAB_REBUILDING)) {
/* /*
* Don't add entries while rebuilding or while a partial write is * Don't add entries while rebuilding or while a partial write is
* outstanding (VDO-2399). * outstanding, as it could result in reference count corruption.
*/ */
break; break;
} }

View File

@ -191,7 +191,7 @@ static inline void __down(struct semaphore *semaphore)
* happens, sleep briefly to avoid keeping the CPU locked up in * happens, sleep briefly to avoid keeping the CPU locked up in
* this loop. We could just call cond_resched, but then we'd * this loop. We could just call cond_resched, but then we'd
* still keep consuming CPU time slices and swamp other threads * still keep consuming CPU time slices and swamp other threads
* trying to do computational work. [VDO-4980] * trying to do computational work.
*/ */
fsleep(1000); fsleep(1000);
} }

View File

@ -544,7 +544,7 @@ int vdo_make(unsigned int instance, struct device_config *config, char **reason,
int result; int result;
struct vdo *vdo; struct vdo *vdo;
/* VDO-3769 - Set a generic reason so we don't ever return garbage. */ /* Initialize with a generic failure reason to prevent returning garbage. */
*reason = "Unspecified error"; *reason = "Unspecified error";
result = uds_allocate(1, struct vdo, __func__, &vdo); result = uds_allocate(1, struct vdo, __func__, &vdo);

View File

@ -123,7 +123,6 @@ int create_multi_block_metadata_vio(struct vdo *vdo, enum vio_type vio_type,
struct vio *vio; struct vio *vio;
int result; int result;
/* If struct vio grows past 256 bytes, we'll lose benefits of VDOSTORY-176. */
BUILD_BUG_ON(sizeof(struct vio) > 256); BUILD_BUG_ON(sizeof(struct vio) > 256);
/* /*