1

vfs-6.9-rc1.fixes

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZfglxgAKCRCRxhvAZXjc
 ovK9APsF7/TMFhNbtW+JsghSyrEk0cOVPizi8JkRDDWNW3qY+wEAxtydhbmWpbKq
 MpIjMHqwjPx3zXBL8Ec/b4vAoJqpJwQ=
 =NgvO
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.9-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "This contains a few small fixes for this merge window:

   - Undo the hiding of silly-rename files in afs. If they're hidden
     they can't be deleted by rm manually anymore causing regressions

   - Avoid caching the preferred address for an afs server to avoid
     accidently overriding an explicitly specified preferred server
     address

   - Fix bad stat() and rmdir() interaction in afs

   - Take a passive reference on the superblock when opening a block
     device so the holder is available to concurrent callers from the
     block layer

   - Clear private data pointer in fscache_begin_operation() to avoid it
     being falsely treated as valid"

* tag 'vfs-6.9-rc1.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  fscache: Fix error handling in fscache_begin_operation()
  fs,block: get holder during claim
  afs: Fix occasional rmdir-then-VNOVNODE with generic/011
  afs: Don't cache preferred address
  afs: Revert "afs: Hide silly-rename files from userspace"
This commit is contained in:
Linus Torvalds 2024-03-18 09:15:50 -07:00
commit 0a7b0acece
7 changed files with 51 additions and 35 deletions

View File

@ -583,6 +583,9 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
mutex_unlock(&bdev->bd_holder_lock);
bd_clear_claiming(whole, holder);
mutex_unlock(&bdev_lock);
if (hops && hops->get_holder)
hops->get_holder(holder);
}
/**
@ -605,6 +608,7 @@ EXPORT_SYMBOL(bd_abort_claiming);
static void bd_end_claim(struct block_device *bdev, void *holder)
{
struct block_device *whole = bdev_whole(bdev);
const struct blk_holder_ops *hops = bdev->bd_holder_ops;
bool unblock = false;
/*
@ -627,6 +631,9 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
whole->bd_holder = NULL;
mutex_unlock(&bdev_lock);
if (hops && hops->put_holder)
hops->put_holder(holder);
/*
* If this was the last claim, remove holder link and unblock evpoll if
* it was a write holder.

View File

@ -474,16 +474,6 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
continue;
}
/* Don't expose silly rename entries to userspace. */
if (nlen > 6 &&
dire->u.name[0] == '.' &&
ctx->actor != afs_lookup_filldir &&
ctx->actor != afs_lookup_one_filldir &&
memcmp(dire->u.name, ".__afs", 6) == 0) {
ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
continue;
}
/* found the next entry */
if (!dir_emit(ctx, dire->u.name, nlen,
ntohl(dire->u.vnode),

View File

@ -602,6 +602,8 @@ iterate_address:
goto wait_for_more_probe_results;
alist = op->estate->addresses;
best_prio = -1;
addr_index = 0;
for (i = 0; i < alist->nr_addrs; i++) {
if (alist->addrs[i].prio > best_prio) {
addr_index = i;
@ -609,9 +611,7 @@ iterate_address:
}
}
addr_index = READ_ONCE(alist->preferred);
if (!test_bit(addr_index, &set))
addr_index = __ffs(set);
alist->preferred = addr_index;
op->addr_index = addr_index;
set_bit(addr_index, &op->addr_tried);
@ -656,12 +656,6 @@ wait_for_more_probe_results:
next_server:
trace_afs_rotate(op, afs_rotate_trace_next_server, 0);
_debug("next");
ASSERT(op->estate);
alist = op->estate->addresses;
if (op->call_responded &&
op->addr_index != READ_ONCE(alist->preferred) &&
test_bit(alist->preferred, &op->addr_tried))
WRITE_ONCE(alist->preferred, op->addr_index);
op->estate = NULL;
goto pick_server;
@ -690,14 +684,7 @@ no_more_servers:
failed:
trace_afs_rotate(op, afs_rotate_trace_failed, 0);
op->flags |= AFS_OPERATION_STOP;
if (op->estate) {
alist = op->estate->addresses;
if (op->call_responded &&
op->addr_index != READ_ONCE(alist->preferred) &&
test_bit(alist->preferred, &op->addr_tried))
WRITE_ONCE(alist->preferred, op->addr_index);
op->estate = NULL;
}
op->estate = NULL;
_leave(" = f [failed %d]", afs_op_error(op));
return false;
}

View File

@ -122,6 +122,9 @@ bool afs_check_validity(const struct afs_vnode *vnode)
const struct afs_volume *volume = vnode->volume;
time64_t deadline = ktime_get_real_seconds() + 10;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
return true;
if (atomic_read(&volume->cb_v_check) != atomic_read(&volume->cb_v_break) ||
atomic64_read(&vnode->cb_expires_at) <= deadline ||
volume->cb_expires_at <= deadline ||
@ -389,12 +392,17 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
key_serial(key));
if (afs_check_validity(vnode))
return 0;
return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
ret = down_write_killable(&vnode->validate_lock);
if (ret < 0)
goto error;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
ret = -ESTALE;
goto error_unlock;
}
/* Validate a volume after the v_break has changed or the volume
* callback expired. We only want to do this once per volume per
* v_break change. The actual work will be done when parsing the
@ -448,12 +456,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
vnode->cb_ro_snapshot = cb_ro_snapshot;
vnode->cb_scrub = cb_scrub;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
_debug("file already deleted");
ret = -ESTALE;
goto error_unlock;
}
/* if the vnode's data version number changed then its contents are
* different */
zap |= test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags);

View File

@ -83,8 +83,10 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres,
cres->debug_id = cookie->debug_id;
cres->inval_counter = cookie->inval_counter;
if (!fscache_begin_cookie_access(cookie, why))
if (!fscache_begin_cookie_access(cookie, why)) {
cres->cache_priv = NULL;
return -ENOBUFS;
}
again:
spin_lock(&cookie->lock);

View File

@ -1515,11 +1515,29 @@ static int fs_bdev_thaw(struct block_device *bdev)
return error;
}
static void fs_bdev_super_get(void *data)
{
struct super_block *sb = data;
spin_lock(&sb_lock);
sb->s_count++;
spin_unlock(&sb_lock);
}
static void fs_bdev_super_put(void *data)
{
struct super_block *sb = data;
put_super(sb);
}
const struct blk_holder_ops fs_holder_ops = {
.mark_dead = fs_bdev_mark_dead,
.sync = fs_bdev_sync,
.freeze = fs_bdev_freeze,
.thaw = fs_bdev_thaw,
.get_holder = fs_bdev_super_get,
.put_holder = fs_bdev_super_put,
};
EXPORT_SYMBOL_GPL(fs_holder_ops);

View File

@ -1505,6 +1505,16 @@ struct blk_holder_ops {
* Thaw the file system mounted on the block device.
*/
int (*thaw)(struct block_device *bdev);
/*
* If needed, get a reference to the holder.
*/
void (*get_holder)(void *holder);
/*
* Release the holder.
*/
void (*put_holder)(void *holder);
};
/*