1

afs: Fix mmap coherency vs 3rd-party changes

Fix the coherency management of mmap'd data such that 3rd-party changes
become visible as soon as possible after the callback notification is
delivered by the fileserver.  This is done by the following means:

 (1) When we break a callback on a vnode specified by the CB.CallBack call
     from the server, we queue a work item (vnode->cb_work) to go and
     clobber all the PTEs mapping to that inode.

     This causes the CPU to trip through the ->map_pages() and
     ->page_mkwrite() handlers if userspace attempts to access the page(s)
     again.

     (Ideally, this would be done in the service handler for CB.CallBack,
     but the server is waiting for our reply before considering, and we
     have a list of vnodes, all of which need breaking - and the process of
     getting the mmap_lock and stripping the PTEs on all CPUs could be
     quite slow.)

 (2) Call afs_validate() from the ->map_pages() handler to check to see if
     the file has changed and to get a new callback promise from the
     server.

Also handle the fileserver telling us that it's dropping all callbacks,
possibly after it's been restarted by sending us a CB.InitCallBackState*
call by the following means:

 (3) Maintain a per-cell list of afs files that are currently mmap'd
     (cell->fs_open_mmaps).

 (4) Add a work item to each server that is invoked if there are any open
     mmaps when CB.InitCallBackState happens.  This work item goes through
     the aforementioned list and invokes the vnode->cb_work work item for
     each one that is currently using this server.

     This causes the PTEs to be cleared, causing ->map_pages() or
     ->page_mkwrite() to be called again, thereby calling afs_validate()
     again.

I've chosen to simply strip the PTEs at the point of notification reception
rather than invalidate all the pages as well because (a) it's faster, (b)
we may get a notification for other reasons than the data being altered (in
which case we don't want to clobber the pagecache) and (c) we need to ask
the server to find out - and I don't want to wait for the reply before
holding up userspace.

This was tested using the attached test program:

	#include <stdbool.h>
	#include <stdio.h>
	#include <stdlib.h>
	#include <unistd.h>
	#include <fcntl.h>
	#include <sys/mman.h>
	int main(int argc, char *argv[])
	{
		size_t size = getpagesize();
		unsigned char *p;
		bool mod = (argc == 3);
		int fd;
		if (argc != 2 && argc != 3) {
			fprintf(stderr, "Format: %s <file> [mod]\n", argv[0]);
			exit(2);
		}
		fd = open(argv[1], mod ? O_RDWR : O_RDONLY);
		if (fd < 0) {
			perror(argv[1]);
			exit(1);
		}

		p = mmap(NULL, size, mod ? PROT_READ|PROT_WRITE : PROT_READ,
			 MAP_SHARED, fd, 0);
		if (p == MAP_FAILED) {
			perror("mmap");
			exit(1);
		}
		for (;;) {
			if (mod) {
				p[0]++;
				msync(p, size, MS_ASYNC);
				fsync(fd);
			}
			printf("%02x", p[0]);
			fflush(stdout);
			sleep(1);
		}
	}

It runs in two modes: in one mode, it mmaps a file, then sits in a loop
reading the first byte, printing it and sleeping for a second; in the
second mode it mmaps a file, then sits in a loop incrementing the first
byte and flushing, then printing and sleeping.

Two instances of this program can be run on different machines, one doing
the reading and one doing the writing.  The reader should see the changes
made by the writer, but without this patch, they aren't because validity
checking is being done lazily - only on entry to the filesystem.

Testing the InitCallBackState change is more complicated.  The server has
to be taken offline, the saved callback state file removed and then the
server restarted whilst the reading-mode program continues to run.  The
client machine then has to poke the server to trigger the InitCallBackState
call.

Signed-off-by: David Howells <dhowells@redhat.com>
Tested-by: Markus Suvanto <markus.suvanto@gmail.com>
cc: linux-afs@lists.infradead.org
Link: https://lore.kernel.org/r/163111668833.283156.382633263709075739.stgit@warthog.procyon.org.uk/
This commit is contained in:
David Howells 2021-09-02 16:43:10 +01:00
parent 63d49d843e
commit 6e0e99d58a
7 changed files with 120 additions and 3 deletions

View File

@ -20,6 +20,37 @@
#include <linux/sched.h>
#include "internal.h"
/*
* Handle invalidation of an mmap'd file. We invalidate all the PTEs referring
* to the pages in this file's pagecache, forcing the kernel to go through
* ->fault() or ->page_mkwrite() - at which point we can handle invalidation
* more fully.
*/
void afs_invalidate_mmap_work(struct work_struct *work)
{
struct afs_vnode *vnode = container_of(work, struct afs_vnode, cb_work);
unmap_mapping_pages(vnode->vfs_inode.i_mapping, 0, 0, false);
}
void afs_server_init_callback_work(struct work_struct *work)
{
struct afs_server *server = container_of(work, struct afs_server, initcb_work);
struct afs_vnode *vnode;
struct afs_cell *cell = server->cell;
down_read(&cell->fs_open_mmaps_lock);
list_for_each_entry(vnode, &cell->fs_open_mmaps, cb_mmap_link) {
if (vnode->cb_server == server) {
clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags);
queue_work(system_unbound_wq, &vnode->cb_work);
}
}
up_read(&cell->fs_open_mmaps_lock);
}
/*
* Allow the fileserver to request callback state (re-)initialisation.
* Unfortunately, UUIDs are not guaranteed unique.
@ -29,8 +60,10 @@ void afs_init_callback_state(struct afs_server *server)
rcu_read_lock();
do {
server->cb_s_break++;
server = rcu_dereference(server->uuid_next);
} while (0);
if (!list_empty(&server->cell->fs_open_mmaps))
queue_work(system_unbound_wq, &server->initcb_work);
} while ((server = rcu_dereference(server->uuid_next)));
rcu_read_unlock();
}
@ -49,6 +82,11 @@ void __afs_break_callback(struct afs_vnode *vnode, enum afs_cb_break_reason reas
if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
afs_lock_may_be_available(vnode);
if (reason != afs_cb_break_for_deleted &&
vnode->status.type == AFS_FTYPE_FILE &&
atomic_read(&vnode->cb_nr_mmap))
queue_work(system_unbound_wq, &vnode->cb_work);
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, true);
} else {
trace_afs_cb_break(&vnode->fid, vnode->cb_break, reason, false);

View File

@ -166,6 +166,8 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net,
seqlock_init(&cell->volume_lock);
cell->fs_servers = RB_ROOT;
seqlock_init(&cell->fs_lock);
INIT_LIST_HEAD(&cell->fs_open_mmaps);
init_rwsem(&cell->fs_open_mmaps_lock);
rwlock_init(&cell->vl_servers_lock);
cell->flags = (1 << AFS_CELL_FL_CHECK_ALIAS);

View File

@ -25,6 +25,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static void afs_readahead(struct readahead_control *ractl);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static void afs_vm_open(struct vm_area_struct *area);
static void afs_vm_close(struct vm_area_struct *area);
static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
const struct file_operations afs_file_operations = {
.open = afs_open,
@ -60,8 +63,10 @@ const struct address_space_operations afs_fs_aops = {
};
static const struct vm_operations_struct afs_vm_ops = {
.open = afs_vm_open,
.close = afs_vm_close,
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.map_pages = afs_vm_map_pages,
.page_mkwrite = afs_page_mkwrite,
};
@ -492,19 +497,79 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
return 1;
}
static void afs_add_open_mmap(struct afs_vnode *vnode)
{
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
down_write(&vnode->volume->cell->fs_open_mmaps_lock);
list_add_tail(&vnode->cb_mmap_link,
&vnode->volume->cell->fs_open_mmaps);
up_write(&vnode->volume->cell->fs_open_mmaps_lock);
}
}
static void afs_drop_open_mmap(struct afs_vnode *vnode)
{
if (!atomic_dec_and_test(&vnode->cb_nr_mmap))
return;
down_write(&vnode->volume->cell->fs_open_mmaps_lock);
if (atomic_read(&vnode->cb_nr_mmap) == 0)
list_del_init(&vnode->cb_mmap_link);
up_write(&vnode->volume->cell->fs_open_mmaps_lock);
flush_work(&vnode->cb_work);
}
/*
* Handle setting up a memory mapping on an AFS file.
*/
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
int ret;
afs_add_open_mmap(vnode);
ret = generic_file_mmap(file, vma);
if (ret == 0)
vma->vm_ops = &afs_vm_ops;
else
afs_drop_open_mmap(vnode);
return ret;
}
static void afs_vm_open(struct vm_area_struct *vma)
{
afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
}
static void afs_vm_close(struct vm_area_struct *vma)
{
afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
}
static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
struct afs_file *af = vmf->vma->vm_file->private_data;
switch (afs_validate(vnode, af->key)) {
case 0:
return filemap_map_pages(vmf, start_pgoff, end_pgoff);
case -ENOMEM:
return VM_FAULT_OOM;
case -EINTR:
case -ERESTARTSYS:
return VM_FAULT_RETRY;
case -ESTALE:
default:
return VM_FAULT_SIGBUS;
}
}
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));

View File

@ -390,6 +390,8 @@ struct afs_cell {
/* Active fileserver interaction state. */
struct rb_root fs_servers; /* afs_server (by server UUID) */
seqlock_t fs_lock; /* For fs_servers */
struct rw_semaphore fs_open_mmaps_lock;
struct list_head fs_open_mmaps; /* List of vnodes that are mmapped */
/* VL server list. */
rwlock_t vl_servers_lock; /* Lock on vl_servers */
@ -503,6 +505,7 @@ struct afs_server {
struct hlist_node addr4_link; /* Link in net->fs_addresses4 */
struct hlist_node addr6_link; /* Link in net->fs_addresses6 */
struct hlist_node proc_link; /* Link in net->fs_proc */
struct work_struct initcb_work; /* Work for CB.InitCallBackState* */
struct afs_server *gc_next; /* Next server in manager's list */
time64_t unuse_time; /* Time at which last unused */
unsigned long flags;
@ -657,7 +660,10 @@ struct afs_vnode {
afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
struct work_struct cb_work; /* Work for mmap'd files */
struct list_head cb_mmap_link; /* Link in cell->fs_open_mmaps */
void *cb_server; /* Server with callback/filelock */
atomic_t cb_nr_mmap; /* Number of mmaps */
unsigned int cb_s_break; /* Mass break counter on ->server */
unsigned int cb_v_break; /* Mass break counter on ->volume */
unsigned int cb_break; /* Break counter on vnode */
@ -965,6 +971,8 @@ extern struct fscache_cookie_def afs_vnode_cache_index_def;
/*
* callback.c
*/
extern void afs_invalidate_mmap_work(struct work_struct *);
extern void afs_server_init_callback_work(struct work_struct *work);
extern void afs_init_callback_state(struct afs_server *);
extern void __afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);
extern void afs_break_callback(struct afs_vnode *, enum afs_cb_break_reason);

View File

@ -235,6 +235,7 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell,
server->addr_version = alist->version;
server->uuid = *uuid;
rwlock_init(&server->fs_lock);
INIT_WORK(&server->initcb_work, afs_server_init_callback_work);
init_waitqueue_head(&server->probe_wq);
INIT_LIST_HEAD(&server->probe_link);
spin_lock_init(&server->probe_lock);
@ -467,6 +468,7 @@ static void afs_destroy_server(struct afs_net *net, struct afs_server *server)
if (test_bit(AFS_SERVER_FL_MAY_HAVE_CB, &server->flags))
afs_give_up_callbacks(net, server);
flush_work(&server->initcb_work);
afs_put_server(net, server, afs_server_trace_destroy);
}

View File

@ -698,6 +698,7 @@ static struct inode *afs_alloc_inode(struct super_block *sb)
vnode->lock_state = AFS_VNODE_LOCK_NONE;
init_rwsem(&vnode->rmdir_lock);
INIT_WORK(&vnode->cb_work, afs_invalidate_mmap_work);
_leave(" = %p", &vnode->vfs_inode);
return &vnode->vfs_inode;

View File

@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
unmap_mapping_range_tree(&mapping->i_mmap, &details);
i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL_GPL(unmap_mapping_pages);
/**
* unmap_mapping_range - unmap the portion of all mmaps in the specified