2019-05-19 05:08:20 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2009-02-09 13:05:49 -07:00
|
|
|
/******************************************************************************
|
|
|
|
* privcmd.c
|
|
|
|
*
|
|
|
|
* Interface to privileged domain-0 commands.
|
|
|
|
*
|
|
|
|
* Copyright (c) 2002-2004, K A Fraser, B Dragovic
|
|
|
|
*/
|
|
|
|
|
2013-06-28 03:21:41 -07:00
|
|
|
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
|
|
|
|
|
2023-08-22 02:45:07 -07:00
|
|
|
#include <linux/eventfd.h>
|
|
|
|
#include <linux/file.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <linux/kernel.h>
|
2011-12-16 09:34:33 -07:00
|
|
|
#include <linux/module.h>
|
2023-08-22 02:45:07 -07:00
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/poll.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
2024-06-18 02:42:29 -07:00
|
|
|
#include <linux/srcu.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <linux/string.h>
|
2023-08-22 02:45:07 -07:00
|
|
|
#include <linux/workqueue.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/seq_file.h>
|
2011-12-16 09:34:33 -07:00
|
|
|
#include <linux/miscdevice.h>
|
2017-02-13 10:03:23 -07:00
|
|
|
#include <linux/moduleparam.h>
|
2023-10-16 00:11:27 -07:00
|
|
|
#include <linux/virtio_mmio.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
#include <asm/xen/hypercall.h>
|
|
|
|
|
|
|
|
#include <xen/xen.h>
|
2023-10-16 00:11:27 -07:00
|
|
|
#include <xen/events.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <xen/privcmd.h>
|
|
|
|
#include <xen/interface/xen.h>
|
2018-05-09 06:16:12 -07:00
|
|
|
#include <xen/interface/memory.h>
|
2017-02-13 10:03:23 -07:00
|
|
|
#include <xen/interface/hvm/dm_op.h>
|
2023-10-16 00:11:27 -07:00
|
|
|
#include <xen/interface/hvm/ioreq.h>
|
2009-02-09 13:05:49 -07:00
|
|
|
#include <xen/features.h>
|
|
|
|
#include <xen/page.h>
|
2009-05-21 02:09:46 -07:00
|
|
|
#include <xen/xen-ops.h>
|
2012-10-17 17:11:21 -07:00
|
|
|
#include <xen/balloon.h>
|
xen/privcmd: Add new syscall to get gsi from dev
On PVH dom0, when passthrough a device to domU, QEMU and xl tools
want to use gsi number to do pirq mapping, see QEMU code
xen_pt_realize->xc_physdev_map_pirq, and xl code
pci_add_dm_done->xc_physdev_map_pirq, but in current codes, the gsi
number is got from file /sys/bus/pci/devices/<sbdf>/irq, that is
wrong, because irq is not equal with gsi, they are in different
spaces, so pirq mapping fails.
And in current linux codes, there is no method to get gsi
for userspace.
For above purpose, record gsi of pcistub devices when init
pcistub and add a new syscall into privcmd to let userspace
can get gsi when they have a need.
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-ID: <20240924061437.2636766-4-Jiqian.Chen@amd.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2024-09-23 23:14:37 -07:00
|
|
|
#ifdef CONFIG_XEN_ACPI
|
|
|
|
#include <xen/acpi.h>
|
|
|
|
#endif
|
2009-05-20 07:42:14 -07:00
|
|
|
|
2011-12-16 09:34:33 -07:00
|
|
|
#include "privcmd.h"
|
|
|
|
|
2024-06-11 16:54:05 -07:00
|
|
|
MODULE_DESCRIPTION("Xen hypercall passthrough driver");
|
2011-12-16 09:34:33 -07:00
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
|
2012-10-17 17:11:21 -07:00
|
|
|
#define PRIV_VMA_LOCKED ((void *)1)
|
|
|
|
|
2017-02-13 10:03:23 -07:00
|
|
|
static unsigned int privcmd_dm_op_max_num = 16;
|
|
|
|
module_param_named(dm_op_max_nr_bufs, privcmd_dm_op_max_num, uint, 0644);
|
|
|
|
MODULE_PARM_DESC(dm_op_max_nr_bufs,
|
|
|
|
"Maximum number of buffers per dm_op hypercall");
|
|
|
|
|
|
|
|
static unsigned int privcmd_dm_op_buf_max_size = 4096;
|
|
|
|
module_param_named(dm_op_buf_max_size, privcmd_dm_op_buf_max_size, uint,
|
|
|
|
0644);
|
|
|
|
MODULE_PARM_DESC(dm_op_buf_max_size,
|
|
|
|
"Maximum size of a dm_op hypercall buffer");
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
struct privcmd_data {
|
|
|
|
domid_t domid;
|
|
|
|
};
|
|
|
|
|
2013-08-23 10:10:06 -07:00
|
|
|
static int privcmd_vma_range_is_mapped(
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
unsigned long addr,
|
|
|
|
unsigned long nr_pages);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static long privcmd_ioctl_hypercall(struct file *file, void __user *udata)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2017-02-13 10:03:24 -07:00
|
|
|
struct privcmd_data *data = file->private_data;
|
2009-02-09 13:05:49 -07:00
|
|
|
struct privcmd_hypercall hypercall;
|
|
|
|
long ret;
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
/* Disallow arbitrary hypercalls if restricted */
|
|
|
|
if (data->domid != DOMID_INVALID)
|
|
|
|
return -EPERM;
|
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2015-02-19 08:23:17 -07:00
|
|
|
xen_preemptible_hcall_begin();
|
2009-02-09 13:05:49 -07:00
|
|
|
ret = privcmd_call(hypercall.op,
|
|
|
|
hypercall.arg[0], hypercall.arg[1],
|
|
|
|
hypercall.arg[2], hypercall.arg[3],
|
|
|
|
hypercall.arg[4]);
|
2015-02-19 08:23:17 -07:00
|
|
|
xen_preemptible_hcall_end();
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void free_page_list(struct list_head *pages)
|
|
|
|
{
|
|
|
|
struct page *p, *n;
|
|
|
|
|
|
|
|
list_for_each_entry_safe(p, n, pages, lru)
|
|
|
|
__free_page(p);
|
|
|
|
|
|
|
|
INIT_LIST_HEAD(pages);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given an array of items in userspace, return a list of pages
|
|
|
|
* containing the data. If copying fails, either because of memory
|
|
|
|
* allocation failure or a problem reading user memory, return an
|
|
|
|
* error code; its up to the caller to dispose of any partial list.
|
|
|
|
*/
|
|
|
|
static int gather_array(struct list_head *pagelist,
|
|
|
|
unsigned nelem, size_t size,
|
2012-08-31 06:59:30 -07:00
|
|
|
const void __user *data)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
|
|
|
unsigned pageidx;
|
|
|
|
void *pagedata;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (size > PAGE_SIZE)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pageidx = PAGE_SIZE;
|
|
|
|
pagedata = NULL; /* quiet, gcc */
|
|
|
|
while (nelem--) {
|
|
|
|
if (pageidx > PAGE_SIZE-size) {
|
|
|
|
struct page *page = alloc_page(GFP_KERNEL);
|
|
|
|
|
|
|
|
ret = -ENOMEM;
|
|
|
|
if (page == NULL)
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
pagedata = page_address(page);
|
|
|
|
|
|
|
|
list_add_tail(&page->lru, pagelist);
|
|
|
|
pageidx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = -EFAULT;
|
|
|
|
if (copy_from_user(pagedata + pageidx, data, size))
|
|
|
|
goto fail;
|
|
|
|
|
|
|
|
data += size;
|
|
|
|
pageidx += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call function "fn" on each element of the array fragmented
|
|
|
|
* over a list of pages.
|
|
|
|
*/
|
|
|
|
static int traverse_pages(unsigned nelem, size_t size,
|
|
|
|
struct list_head *pos,
|
|
|
|
int (*fn)(void *data, void *state),
|
|
|
|
void *state)
|
|
|
|
{
|
|
|
|
void *pagedata;
|
|
|
|
unsigned pageidx;
|
2009-05-20 07:42:14 -07:00
|
|
|
int ret = 0;
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
BUG_ON(size > PAGE_SIZE);
|
|
|
|
|
|
|
|
pageidx = PAGE_SIZE;
|
|
|
|
pagedata = NULL; /* hush, gcc */
|
|
|
|
|
|
|
|
while (nelem--) {
|
|
|
|
if (pageidx > PAGE_SIZE-size) {
|
|
|
|
struct page *page;
|
|
|
|
pos = pos->next;
|
|
|
|
page = list_entry(pos, struct page, lru);
|
|
|
|
pagedata = page_address(page);
|
|
|
|
pageidx = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = (*fn)(pagedata + pageidx, state);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
pageidx += size;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-03-11 07:49:57 -07:00
|
|
|
/*
|
|
|
|
* Similar to traverse_pages, but use each page as a "block" of
|
|
|
|
* data to be processed as one unit.
|
|
|
|
*/
|
|
|
|
static int traverse_pages_block(unsigned nelem, size_t size,
|
|
|
|
struct list_head *pos,
|
|
|
|
int (*fn)(void *data, int nr, void *state),
|
|
|
|
void *state)
|
|
|
|
{
|
|
|
|
void *pagedata;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
BUG_ON(size > PAGE_SIZE);
|
|
|
|
|
|
|
|
while (nelem) {
|
|
|
|
int nr = (PAGE_SIZE/size);
|
|
|
|
struct page *page;
|
|
|
|
if (nr > nelem)
|
|
|
|
nr = nelem;
|
|
|
|
pos = pos->next;
|
|
|
|
page = list_entry(pos, struct page, lru);
|
|
|
|
pagedata = page_address(page);
|
|
|
|
ret = (*fn)(pagedata, nr, state);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
nelem -= nr;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
struct mmap_gfn_state {
|
2009-02-09 13:05:49 -07:00
|
|
|
unsigned long va;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
domid_t domain;
|
|
|
|
};
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
static int mmap_gfn_range(void *data, void *state)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
|
|
|
struct privcmd_mmap_entry *msg = data;
|
2015-08-07 09:34:41 -07:00
|
|
|
struct mmap_gfn_state *st = state;
|
2009-02-09 13:05:49 -07:00
|
|
|
struct vm_area_struct *vma = st->vma;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
/* Do not allow range to wrap the address space. */
|
|
|
|
if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
|
|
|
|
((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Range chunks must be contiguous in va space. */
|
|
|
|
if ((msg->va != st->va) ||
|
|
|
|
((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
rc = xen_remap_domain_gfn_range(vma,
|
2009-05-21 02:09:46 -07:00
|
|
|
msg->va & PAGE_MASK,
|
|
|
|
msg->mfn, msg->npages,
|
|
|
|
vma->vm_page_prot,
|
2012-10-17 13:37:49 -07:00
|
|
|
st->domain, NULL);
|
2009-02-09 13:05:49 -07:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
st->va += msg->npages << PAGE_SHIFT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static long privcmd_ioctl_mmap(struct file *file, void __user *udata)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2017-02-13 10:03:24 -07:00
|
|
|
struct privcmd_data *data = file->private_data;
|
2009-02-09 13:05:49 -07:00
|
|
|
struct privcmd_mmap mmapcmd;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
int rc;
|
|
|
|
LIST_HEAD(pagelist);
|
2015-08-07 09:34:41 -07:00
|
|
|
struct mmap_gfn_state state;
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2021-09-22 03:18:25 -07:00
|
|
|
/* We only support privcmd_ioctl_mmap_batch for non-auto-translated. */
|
2012-10-17 17:11:21 -07:00
|
|
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
|
|
|
return -ENOSYS;
|
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != mmapcmd.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
rc = gather_array(&pagelist,
|
|
|
|
mmapcmd.num, sizeof(struct privcmd_mmap_entry),
|
|
|
|
mmapcmd.entry);
|
|
|
|
|
|
|
|
if (rc || list_empty(&pagelist))
|
|
|
|
goto out;
|
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_lock(mm);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
{
|
|
|
|
struct page *page = list_first_entry(&pagelist,
|
|
|
|
struct page, lru);
|
|
|
|
struct privcmd_mmap_entry *msg = page_address(page);
|
|
|
|
|
2022-09-06 12:48:49 -07:00
|
|
|
vma = vma_lookup(mm, msg->va);
|
2009-02-09 13:05:49 -07:00
|
|
|
rc = -EINVAL;
|
|
|
|
|
2013-08-23 10:10:06 -07:00
|
|
|
if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
|
2009-02-09 13:05:49 -07:00
|
|
|
goto out_up;
|
2013-08-23 10:10:06 -07:00
|
|
|
vma->vm_private_data = PRIV_VMA_LOCKED;
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
state.va = vma->vm_start;
|
|
|
|
state.vma = vma;
|
|
|
|
state.domain = mmapcmd.dom;
|
|
|
|
|
|
|
|
rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
|
|
|
|
&pagelist,
|
2015-08-07 09:34:41 -07:00
|
|
|
mmap_gfn_range, &state);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
|
|
|
|
out_up:
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_unlock(mm);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
out:
|
|
|
|
free_page_list(&pagelist);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct mmap_batch_state {
|
|
|
|
domid_t domain;
|
|
|
|
unsigned long va;
|
|
|
|
struct vm_area_struct *vma;
|
2012-10-17 17:11:21 -07:00
|
|
|
int index;
|
2012-08-31 06:59:30 -07:00
|
|
|
/* A tristate:
|
|
|
|
* 0 for no errors
|
|
|
|
* 1 if at least one error has happened (and no
|
|
|
|
* -ENOENT errors have happened)
|
|
|
|
* -ENOENT if at least 1 -ENOENT has happened.
|
|
|
|
*/
|
|
|
|
int global_error;
|
2013-01-14 20:35:40 -07:00
|
|
|
int version;
|
2012-08-31 06:59:30 -07:00
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
/* User-space gfn array to store errors in the second pass for V1. */
|
|
|
|
xen_pfn_t __user *user_gfn;
|
2013-01-14 20:35:40 -07:00
|
|
|
/* User-space int array to store errors in the second pass for V2. */
|
|
|
|
int __user *user_err;
|
2009-02-09 13:05:49 -07:00
|
|
|
};
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
/* auto translated dom0 note: if domU being created is PV, then gfn is
|
|
|
|
* mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
|
2012-10-17 17:11:21 -07:00
|
|
|
*/
|
2015-03-11 07:49:57 -07:00
|
|
|
static int mmap_batch_fn(void *data, int nr, void *state)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2015-08-07 09:34:41 -07:00
|
|
|
xen_pfn_t *gfnp = data;
|
2009-02-09 13:05:49 -07:00
|
|
|
struct mmap_batch_state *st = state;
|
2012-10-17 17:11:21 -07:00
|
|
|
struct vm_area_struct *vma = st->vma;
|
|
|
|
struct page **pages = vma->vm_private_data;
|
2015-03-11 07:49:57 -07:00
|
|
|
struct page **cur_pages = NULL;
|
2012-08-31 06:59:30 -07:00
|
|
|
int ret;
|
|
|
|
|
2012-10-17 17:11:21 -07:00
|
|
|
if (xen_feature(XENFEAT_auto_translated_physmap))
|
2015-03-11 07:49:57 -07:00
|
|
|
cur_pages = &pages[st->index];
|
2012-10-17 17:11:21 -07:00
|
|
|
|
2015-03-11 07:49:57 -07:00
|
|
|
BUG_ON(nr < 0);
|
2015-08-07 09:34:41 -07:00
|
|
|
ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
|
|
|
|
(int *)gfnp, st->vma->vm_page_prot,
|
2015-03-11 07:49:57 -07:00
|
|
|
st->domain, cur_pages);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2015-03-11 07:49:57 -07:00
|
|
|
/* Adjust the global_error? */
|
|
|
|
if (ret != nr) {
|
2012-08-31 06:59:30 -07:00
|
|
|
if (ret == -ENOENT)
|
|
|
|
st->global_error = -ENOENT;
|
|
|
|
else {
|
|
|
|
/* Record that at least one error has happened. */
|
|
|
|
if (st->global_error == 0)
|
|
|
|
st->global_error = 1;
|
|
|
|
}
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
2017-05-31 06:03:57 -07:00
|
|
|
st->va += XEN_PAGE_SIZE * nr;
|
|
|
|
st->index += nr / XEN_PFN_PER_PAGE;
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-03-11 07:49:57 -07:00
|
|
|
static int mmap_return_error(int err, struct mmap_batch_state *st)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2015-03-11 07:49:57 -07:00
|
|
|
int ret;
|
2012-08-31 06:59:30 -07:00
|
|
|
|
2013-01-14 20:35:40 -07:00
|
|
|
if (st->version == 1) {
|
2015-03-11 07:49:57 -07:00
|
|
|
if (err) {
|
2015-08-07 09:34:41 -07:00
|
|
|
xen_pfn_t gfn;
|
2015-03-11 07:49:57 -07:00
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
ret = get_user(gfn, st->user_gfn);
|
2015-03-11 07:49:57 -07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
/*
|
|
|
|
* V1 encodes the error codes in the 32bit top
|
2015-08-07 09:34:41 -07:00
|
|
|
* nibble of the gfn (with its known
|
2015-03-11 07:49:57 -07:00
|
|
|
* limitations vis-a-vis 64 bit callers).
|
|
|
|
*/
|
2015-08-07 09:34:41 -07:00
|
|
|
gfn |= (err == -ENOENT) ?
|
2015-03-11 07:49:57 -07:00
|
|
|
PRIVCMD_MMAPBATCH_PAGED_ERROR :
|
|
|
|
PRIVCMD_MMAPBATCH_MFN_ERROR;
|
2015-08-07 09:34:41 -07:00
|
|
|
return __put_user(gfn, st->user_gfn++);
|
2015-03-11 07:49:57 -07:00
|
|
|
} else
|
2015-08-07 09:34:41 -07:00
|
|
|
st->user_gfn++;
|
2013-01-14 20:35:40 -07:00
|
|
|
} else { /* st->version == 2 */
|
|
|
|
if (err)
|
|
|
|
return __put_user(err, st->user_err++);
|
|
|
|
else
|
|
|
|
st->user_err++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
|
|
|
|
2015-03-11 07:49:57 -07:00
|
|
|
static int mmap_return_errors(void *data, int nr, void *state)
|
|
|
|
{
|
|
|
|
struct mmap_batch_state *st = state;
|
|
|
|
int *errs = data;
|
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
for (i = 0; i < nr; i++) {
|
|
|
|
ret = mmap_return_error(errs[i], st);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
/* Allocate pfns that are then mapped with gfns from foreign domid. Update
|
2012-10-17 17:11:21 -07:00
|
|
|
* the vma with the page info to use later.
|
|
|
|
* Returns: 0 if success, otherwise -errno
|
|
|
|
*/
|
|
|
|
static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
|
|
|
|
{
|
|
|
|
int rc;
|
|
|
|
struct page **pages;
|
|
|
|
|
2021-09-22 03:16:35 -07:00
|
|
|
pages = kvcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
|
2012-10-17 17:11:21 -07:00
|
|
|
if (pages == NULL)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2020-09-01 01:33:26 -07:00
|
|
|
rc = xen_alloc_unpopulated_pages(numpgs, pages);
|
2012-10-17 17:11:21 -07:00
|
|
|
if (rc != 0) {
|
|
|
|
pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
|
|
|
|
numpgs, rc);
|
2021-09-22 03:16:35 -07:00
|
|
|
kvfree(pages);
|
2012-10-17 17:11:21 -07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2013-08-23 10:10:06 -07:00
|
|
|
BUG_ON(vma->vm_private_data != NULL);
|
2012-10-17 17:11:21 -07:00
|
|
|
vma->vm_private_data = pages;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-09-09 15:39:26 -07:00
|
|
|
static const struct vm_operations_struct privcmd_vm_ops;
|
2009-03-08 04:10:00 -07:00
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static long privcmd_ioctl_mmap_batch(
|
|
|
|
struct file *file, void __user *udata, int version)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2017-02-13 10:03:24 -07:00
|
|
|
struct privcmd_data *data = file->private_data;
|
2009-02-09 13:05:49 -07:00
|
|
|
int ret;
|
2012-08-31 06:59:30 -07:00
|
|
|
struct privcmd_mmapbatch_v2 m;
|
2009-02-09 13:05:49 -07:00
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
unsigned long nr_pages;
|
|
|
|
LIST_HEAD(pagelist);
|
|
|
|
struct mmap_batch_state state;
|
|
|
|
|
2012-08-31 06:59:30 -07:00
|
|
|
switch (version) {
|
|
|
|
case 1:
|
|
|
|
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
|
|
|
|
return -EFAULT;
|
|
|
|
/* Returns per-frame error in m.arr. */
|
|
|
|
m.err = NULL;
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 19:57:57 -07:00
|
|
|
if (!access_ok(m.arr, m.num * sizeof(*m.arr)))
|
2012-08-31 06:59:30 -07:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
|
|
|
|
return -EFAULT;
|
|
|
|
/* Returns per-frame error code in m.err. */
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 19:57:57 -07:00
|
|
|
if (!access_ok(m.err, m.num * (sizeof(*m.err))))
|
2012-08-31 06:59:30 -07:00
|
|
|
return -EFAULT;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != m.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
2015-05-05 08:54:12 -07:00
|
|
|
nr_pages = DIV_ROUND_UP(m.num, XEN_PFN_PER_PAGE);
|
2009-02-09 13:05:49 -07:00
|
|
|
if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2012-08-31 06:59:30 -07:00
|
|
|
ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2012-08-31 06:59:30 -07:00
|
|
|
if (ret)
|
2009-02-09 13:05:49 -07:00
|
|
|
goto out;
|
2012-08-31 06:59:30 -07:00
|
|
|
if (list_empty(&pagelist)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-01-14 20:35:40 -07:00
|
|
|
if (version == 2) {
|
|
|
|
/* Zero error array now to only copy back actual errors. */
|
|
|
|
if (clear_user(m.err, sizeof(int) * m.num)) {
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
2012-08-31 06:59:30 -07:00
|
|
|
}
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_lock(mm);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
vma = find_vma(mm, m.addr);
|
|
|
|
if (!vma ||
|
2013-08-23 10:10:06 -07:00
|
|
|
vma->vm_ops != &privcmd_vm_ops) {
|
2012-11-16 11:36:49 -07:00
|
|
|
ret = -EINVAL;
|
2013-08-23 10:10:06 -07:00
|
|
|
goto out_unlock;
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
2013-08-23 10:10:06 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Caller must either:
|
|
|
|
*
|
|
|
|
* Map the whole VMA range, which will also allocate all the
|
|
|
|
* pages required for the auto_translated_physmap case.
|
|
|
|
*
|
|
|
|
* Or
|
|
|
|
*
|
|
|
|
* Map unmapped holes left from a previous map attempt (e.g.,
|
|
|
|
* because those foreign frames were previously paged out).
|
|
|
|
*/
|
|
|
|
if (vma->vm_private_data == NULL) {
|
|
|
|
if (m.addr != vma->vm_start ||
|
|
|
|
m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
2015-05-05 08:54:12 -07:00
|
|
|
ret = alloc_empty_pages(vma, nr_pages);
|
2013-08-23 10:10:06 -07:00
|
|
|
if (ret < 0)
|
|
|
|
goto out_unlock;
|
|
|
|
} else
|
|
|
|
vma->vm_private_data = PRIV_VMA_LOCKED;
|
|
|
|
} else {
|
|
|
|
if (m.addr < vma->vm_start ||
|
|
|
|
m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out_unlock;
|
2012-10-17 17:11:21 -07:00
|
|
|
}
|
|
|
|
}
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2012-08-31 06:59:30 -07:00
|
|
|
state.domain = m.dom;
|
|
|
|
state.vma = vma;
|
|
|
|
state.va = m.addr;
|
2012-10-17 17:11:21 -07:00
|
|
|
state.index = 0;
|
2012-08-31 06:59:30 -07:00
|
|
|
state.global_error = 0;
|
2013-01-14 20:35:40 -07:00
|
|
|
state.version = version;
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2015-05-05 08:54:12 -07:00
|
|
|
BUILD_BUG_ON(((PAGE_SIZE / sizeof(xen_pfn_t)) % XEN_PFN_PER_PAGE) != 0);
|
2012-08-31 06:59:30 -07:00
|
|
|
/* mmap_batch_fn guarantees ret == 0 */
|
2015-03-11 07:49:57 -07:00
|
|
|
BUG_ON(traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
|
|
|
&pagelist, mmap_batch_fn, &state));
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_unlock(mm);
|
2009-02-09 13:05:49 -07:00
|
|
|
|
2013-01-14 20:35:40 -07:00
|
|
|
if (state.global_error) {
|
|
|
|
/* Write back errors in second pass. */
|
2015-08-07 09:34:41 -07:00
|
|
|
state.user_gfn = (xen_pfn_t *)m.arr;
|
2013-01-14 20:35:40 -07:00
|
|
|
state.user_err = m.err;
|
2015-03-11 07:49:57 -07:00
|
|
|
ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
|
|
|
|
&pagelist, mmap_return_errors, &state);
|
2013-01-14 20:35:40 -07:00
|
|
|
} else
|
|
|
|
ret = 0;
|
2012-08-31 06:59:30 -07:00
|
|
|
|
|
|
|
/* If we have not had any EFAULT-like global errors then set the global
|
|
|
|
* error to -ENOENT if necessary. */
|
|
|
|
if ((ret == 0) && (state.global_error == -ENOENT))
|
|
|
|
ret = -ENOENT;
|
2009-02-09 13:05:49 -07:00
|
|
|
|
|
|
|
out:
|
|
|
|
free_page_list(&pagelist);
|
|
|
|
return ret;
|
2013-08-23 10:10:06 -07:00
|
|
|
|
|
|
|
out_unlock:
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_unlock(mm);
|
2013-08-23 10:10:06 -07:00
|
|
|
goto out;
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
|
|
|
|
2017-02-13 10:03:23 -07:00
|
|
|
static int lock_pages(
|
|
|
|
struct privcmd_dm_op_buf kbufs[], unsigned int num,
|
2020-07-11 20:39:53 -07:00
|
|
|
struct page *pages[], unsigned int nr_pages, unsigned int *pinned)
|
2017-02-13 10:03:23 -07:00
|
|
|
{
|
2022-08-25 07:19:18 -07:00
|
|
|
unsigned int i, off = 0;
|
2017-02-13 10:03:23 -07:00
|
|
|
|
2022-08-25 07:19:18 -07:00
|
|
|
for (i = 0; i < num; ) {
|
2017-02-13 10:03:23 -07:00
|
|
|
unsigned int requested;
|
2020-07-11 20:39:53 -07:00
|
|
|
int page_count;
|
2017-02-13 10:03:23 -07:00
|
|
|
|
|
|
|
requested = DIV_ROUND_UP(
|
|
|
|
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
|
2022-08-25 07:19:18 -07:00
|
|
|
PAGE_SIZE) - off;
|
2017-02-13 10:03:23 -07:00
|
|
|
if (requested > nr_pages)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
2020-07-11 20:39:55 -07:00
|
|
|
page_count = pin_user_pages_fast(
|
2022-08-25 07:19:18 -07:00
|
|
|
(unsigned long)kbufs[i].uptr + off * PAGE_SIZE,
|
2017-02-13 10:03:23 -07:00
|
|
|
requested, FOLL_WRITE, pages);
|
2022-08-25 07:19:18 -07:00
|
|
|
if (page_count <= 0)
|
|
|
|
return page_count ? : -EFAULT;
|
2017-02-13 10:03:23 -07:00
|
|
|
|
2020-07-11 20:39:53 -07:00
|
|
|
*pinned += page_count;
|
|
|
|
nr_pages -= page_count;
|
|
|
|
pages += page_count;
|
2022-08-25 07:19:18 -07:00
|
|
|
|
|
|
|
off = (requested == page_count) ? 0 : off + page_count;
|
|
|
|
i += !off;
|
2017-02-13 10:03:23 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unlock_pages(struct page *pages[], unsigned int nr_pages)
|
|
|
|
{
|
2020-07-11 20:39:55 -07:00
|
|
|
unpin_user_pages_dirty_lock(pages, nr_pages, true);
|
2017-02-13 10:03:23 -07:00
|
|
|
}
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static long privcmd_ioctl_dm_op(struct file *file, void __user *udata)
|
2017-02-13 10:03:23 -07:00
|
|
|
{
|
2017-02-13 10:03:24 -07:00
|
|
|
struct privcmd_data *data = file->private_data;
|
2017-02-13 10:03:23 -07:00
|
|
|
struct privcmd_dm_op kdata;
|
|
|
|
struct privcmd_dm_op_buf *kbufs;
|
|
|
|
unsigned int nr_pages = 0;
|
|
|
|
struct page **pages = NULL;
|
|
|
|
struct xen_dm_op_buf *xbufs = NULL;
|
|
|
|
unsigned int i;
|
|
|
|
long rc;
|
2020-07-11 20:39:53 -07:00
|
|
|
unsigned int pinned = 0;
|
2017-02-13 10:03:23 -07:00
|
|
|
|
|
|
|
if (copy_from_user(&kdata, udata, sizeof(kdata)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
2017-02-13 10:03:23 -07:00
|
|
|
if (kdata.num == 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (kdata.num > privcmd_dm_op_max_num)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
kbufs = kcalloc(kdata.num, sizeof(*kbufs), GFP_KERNEL);
|
|
|
|
if (!kbufs)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (copy_from_user(kbufs, kdata.ubufs,
|
|
|
|
sizeof(*kbufs) * kdata.num)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < kdata.num; i++) {
|
|
|
|
if (kbufs[i].size > privcmd_dm_op_buf_max_size) {
|
|
|
|
rc = -E2BIG;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
Remove 'type' argument from access_ok() function
Nobody has actually used the type (VERIFY_READ vs VERIFY_WRITE) argument
of the user address range verification function since we got rid of the
old racy i386-only code to walk page tables by hand.
It existed because the original 80386 would not honor the write protect
bit when in kernel mode, so you had to do COW by hand before doing any
user access. But we haven't supported that in a long time, and these
days the 'type' argument is a purely historical artifact.
A discussion about extending 'user_access_begin()' to do the range
checking resulted this patch, because there is no way we're going to
move the old VERIFY_xyz interface to that model. And it's best done at
the end of the merge window when I've done most of my merges, so let's
just get this done once and for all.
This patch was mostly done with a sed-script, with manual fix-ups for
the cases that weren't of the trivial 'access_ok(VERIFY_xyz' form.
There were a couple of notable cases:
- csky still had the old "verify_area()" name as an alias.
- the iter_iov code had magical hardcoded knowledge of the actual
values of VERIFY_{READ,WRITE} (not that they mattered, since nothing
really used it)
- microblaze used the type argument for a debug printout
but other than those oddities this should be a total no-op patch.
I tried to fix up all architectures, did fairly extensive grepping for
access_ok() uses, and the changes are trivial, but I may have missed
something. Any missed conversion should be trivially fixable, though.
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2019-01-03 19:57:57 -07:00
|
|
|
if (!access_ok(kbufs[i].uptr,
|
2017-02-13 10:03:23 -07:00
|
|
|
kbufs[i].size)) {
|
|
|
|
rc = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nr_pages += DIV_ROUND_UP(
|
|
|
|
offset_in_page(kbufs[i].uptr) + kbufs[i].size,
|
|
|
|
PAGE_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
|
|
|
|
if (!pages) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
xbufs = kcalloc(kdata.num, sizeof(*xbufs), GFP_KERNEL);
|
|
|
|
if (!xbufs) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2020-07-11 20:39:53 -07:00
|
|
|
rc = lock_pages(kbufs, kdata.num, pages, nr_pages, &pinned);
|
2022-08-25 07:19:18 -07:00
|
|
|
if (rc < 0)
|
2017-02-13 10:03:23 -07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (i = 0; i < kdata.num; i++) {
|
|
|
|
set_xen_guest_handle(xbufs[i].h, kbufs[i].uptr);
|
|
|
|
xbufs[i].size = kbufs[i].size;
|
|
|
|
}
|
|
|
|
|
|
|
|
xen_preemptible_hcall_begin();
|
|
|
|
rc = HYPERVISOR_dm_op(kdata.dom, kdata.num, xbufs);
|
|
|
|
xen_preemptible_hcall_end();
|
|
|
|
|
|
|
|
out:
|
2022-08-25 07:19:18 -07:00
|
|
|
unlock_pages(pages, pinned);
|
2017-02-13 10:03:23 -07:00
|
|
|
kfree(xbufs);
|
|
|
|
kfree(pages);
|
|
|
|
kfree(kbufs);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static long privcmd_ioctl_restrict(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
struct privcmd_data *data = file->private_data;
|
|
|
|
domid_t dom;
|
|
|
|
|
|
|
|
if (copy_from_user(&dom, udata, sizeof(dom)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* Set restriction to the specified domain, or check it matches */
|
|
|
|
if (data->domid == DOMID_INVALID)
|
|
|
|
data->domid = dom;
|
|
|
|
else if (data->domid != dom)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-01-12 04:53:58 -07:00
|
|
|
static long privcmd_ioctl_mmap_resource(struct file *file,
|
|
|
|
struct privcmd_mmap_resource __user *udata)
|
2018-05-09 06:16:12 -07:00
|
|
|
{
|
|
|
|
struct privcmd_data *data = file->private_data;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct privcmd_mmap_resource kdata;
|
|
|
|
xen_pfn_t *pfns = NULL;
|
2021-01-12 04:53:58 -07:00
|
|
|
struct xen_mem_acquire_resource xdata = { };
|
2018-05-09 06:16:12 -07:00
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (copy_from_user(&kdata, udata, sizeof(kdata)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != kdata.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
2021-01-12 04:53:58 -07:00
|
|
|
/* Both fields must be set or unset */
|
|
|
|
if (!!kdata.addr != !!kdata.num)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
xdata.domid = kdata.dom;
|
|
|
|
xdata.type = kdata.type;
|
|
|
|
xdata.id = kdata.id;
|
|
|
|
|
|
|
|
if (!kdata.addr && !kdata.num) {
|
|
|
|
/* Query the size of the resource. */
|
|
|
|
rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
return __put_user(xdata.nr_frames, &udata->num);
|
|
|
|
}
|
|
|
|
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_lock(mm);
|
2018-05-09 06:16:12 -07:00
|
|
|
|
|
|
|
vma = find_vma(mm, kdata.addr);
|
|
|
|
if (!vma || vma->vm_ops != &privcmd_vm_ops) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2022-11-25 22:07:45 -07:00
|
|
|
pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
|
2018-05-09 06:16:12 -07:00
|
|
|
if (!pfns) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2019-07-22 00:46:29 -07:00
|
|
|
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
|
|
|
|
xen_feature(XENFEAT_auto_translated_physmap)) {
|
2018-05-09 06:16:12 -07:00
|
|
|
unsigned int nr = DIV_ROUND_UP(kdata.num, XEN_PFN_PER_PAGE);
|
|
|
|
struct page **pages;
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
rc = alloc_empty_pages(vma, nr);
|
|
|
|
if (rc < 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
pages = vma->vm_private_data;
|
2023-10-16 00:11:27 -07:00
|
|
|
|
2018-05-09 06:16:12 -07:00
|
|
|
for (i = 0; i < kdata.num; i++) {
|
|
|
|
xen_pfn_t pfn =
|
|
|
|
page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
|
|
|
|
|
|
|
|
pfns[i] = pfn + (i % XEN_PFN_PER_PAGE);
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
vma->vm_private_data = PRIV_VMA_LOCKED;
|
|
|
|
|
|
|
|
xdata.frame = kdata.idx;
|
|
|
|
xdata.nr_frames = kdata.num;
|
|
|
|
set_xen_guest_handle(xdata.frame_list, pfns);
|
|
|
|
|
|
|
|
xen_preemptible_hcall_begin();
|
|
|
|
rc = HYPERVISOR_memory_op(XENMEM_acquire_resource, &xdata);
|
|
|
|
xen_preemptible_hcall_end();
|
|
|
|
|
|
|
|
if (rc)
|
|
|
|
goto out;
|
|
|
|
|
2019-07-22 00:46:29 -07:00
|
|
|
if (IS_ENABLED(CONFIG_XEN_AUTO_XLATE) &&
|
|
|
|
xen_feature(XENFEAT_auto_translated_physmap)) {
|
|
|
|
rc = xen_remap_vma_range(vma, kdata.addr, kdata.num << PAGE_SHIFT);
|
2018-05-09 06:16:12 -07:00
|
|
|
} else {
|
|
|
|
unsigned int domid =
|
|
|
|
(xdata.flags & XENMEM_rsrc_acq_caller_owned) ?
|
|
|
|
DOMID_SELF : kdata.dom;
|
2021-09-22 03:17:48 -07:00
|
|
|
int num, *errs = (int *)pfns;
|
2018-05-09 06:16:12 -07:00
|
|
|
|
2021-09-22 03:17:48 -07:00
|
|
|
BUILD_BUG_ON(sizeof(*errs) > sizeof(*pfns));
|
2018-05-09 06:16:12 -07:00
|
|
|
num = xen_remap_domain_mfn_array(vma,
|
|
|
|
kdata.addr & PAGE_MASK,
|
2021-09-22 03:17:48 -07:00
|
|
|
pfns, kdata.num, errs,
|
2018-05-09 06:16:12 -07:00
|
|
|
vma->vm_page_prot,
|
2021-09-22 03:18:25 -07:00
|
|
|
domid);
|
2018-05-09 06:16:12 -07:00
|
|
|
if (num < 0)
|
|
|
|
rc = num;
|
|
|
|
else if (num != kdata.num) {
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < num; i++) {
|
2021-09-22 03:17:48 -07:00
|
|
|
rc = errs[i];
|
2018-05-09 06:16:12 -07:00
|
|
|
if (rc < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
rc = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
2020-06-08 21:33:25 -07:00
|
|
|
mmap_write_unlock(mm);
|
2018-05-09 06:16:12 -07:00
|
|
|
kfree(pfns);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
xen/privcmd: Add new syscall to get gsi from dev
On PVH dom0, when passthrough a device to domU, QEMU and xl tools
want to use gsi number to do pirq mapping, see QEMU code
xen_pt_realize->xc_physdev_map_pirq, and xl code
pci_add_dm_done->xc_physdev_map_pirq, but in current codes, the gsi
number is got from file /sys/bus/pci/devices/<sbdf>/irq, that is
wrong, because irq is not equal with gsi, they are in different
spaces, so pirq mapping fails.
And in current linux codes, there is no method to get gsi
for userspace.
For above purpose, record gsi of pcistub devices when init
pcistub and add a new syscall into privcmd to let userspace
can get gsi when they have a need.
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-ID: <20240924061437.2636766-4-Jiqian.Chen@amd.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2024-09-23 23:14:37 -07:00
|
|
|
static long privcmd_ioctl_pcidev_get_gsi(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
#if defined(CONFIG_XEN_ACPI)
|
2024-10-12 01:45:37 -07:00
|
|
|
int rc;
|
xen/privcmd: Add new syscall to get gsi from dev
On PVH dom0, when passthrough a device to domU, QEMU and xl tools
want to use gsi number to do pirq mapping, see QEMU code
xen_pt_realize->xc_physdev_map_pirq, and xl code
pci_add_dm_done->xc_physdev_map_pirq, but in current codes, the gsi
number is got from file /sys/bus/pci/devices/<sbdf>/irq, that is
wrong, because irq is not equal with gsi, they are in different
spaces, so pirq mapping fails.
And in current linux codes, there is no method to get gsi
for userspace.
For above purpose, record gsi of pcistub devices when init
pcistub and add a new syscall into privcmd to let userspace
can get gsi when they have a need.
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-ID: <20240924061437.2636766-4-Jiqian.Chen@amd.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2024-09-23 23:14:37 -07:00
|
|
|
struct privcmd_pcidev_get_gsi kdata;
|
|
|
|
|
|
|
|
if (copy_from_user(&kdata, udata, sizeof(kdata)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
2024-10-12 01:45:37 -07:00
|
|
|
rc = xen_acpi_get_gsi_from_sbdf(kdata.sbdf);
|
xen/privcmd: Add new syscall to get gsi from dev
On PVH dom0, when passthrough a device to domU, QEMU and xl tools
want to use gsi number to do pirq mapping, see QEMU code
xen_pt_realize->xc_physdev_map_pirq, and xl code
pci_add_dm_done->xc_physdev_map_pirq, but in current codes, the gsi
number is got from file /sys/bus/pci/devices/<sbdf>/irq, that is
wrong, because irq is not equal with gsi, they are in different
spaces, so pirq mapping fails.
And in current linux codes, there is no method to get gsi
for userspace.
For above purpose, record gsi of pcistub devices when init
pcistub and add a new syscall into privcmd to let userspace
can get gsi when they have a need.
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-ID: <20240924061437.2636766-4-Jiqian.Chen@amd.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2024-09-23 23:14:37 -07:00
|
|
|
if (rc < 0)
|
|
|
|
return rc;
|
|
|
|
|
|
|
|
kdata.gsi = rc;
|
|
|
|
if (copy_to_user(udata, &kdata, sizeof(kdata)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
#else
|
|
|
|
return -EINVAL;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2023-10-16 00:11:27 -07:00
|
|
|
#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
|
2023-08-22 02:45:07 -07:00
|
|
|
/* Irqfd support */
|
|
|
|
static struct workqueue_struct *irqfd_cleanup_wq;
|
2024-06-18 02:42:28 -07:00
|
|
|
static DEFINE_SPINLOCK(irqfds_lock);
|
2024-06-18 02:42:29 -07:00
|
|
|
DEFINE_STATIC_SRCU(irqfds_srcu);
|
2023-08-22 02:45:07 -07:00
|
|
|
static LIST_HEAD(irqfds_list);
|
|
|
|
|
|
|
|
struct privcmd_kernel_irqfd {
|
|
|
|
struct xen_dm_op_buf xbufs;
|
|
|
|
domid_t dom;
|
|
|
|
bool error;
|
|
|
|
struct eventfd_ctx *eventfd;
|
|
|
|
struct work_struct shutdown;
|
|
|
|
wait_queue_entry_t wait;
|
|
|
|
struct list_head list;
|
|
|
|
poll_table pt;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void irqfd_deactivate(struct privcmd_kernel_irqfd *kirqfd)
|
|
|
|
{
|
|
|
|
lockdep_assert_held(&irqfds_lock);
|
|
|
|
|
|
|
|
list_del_init(&kirqfd->list);
|
|
|
|
queue_work(irqfd_cleanup_wq, &kirqfd->shutdown);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irqfd_shutdown(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd =
|
|
|
|
container_of(work, struct privcmd_kernel_irqfd, shutdown);
|
|
|
|
u64 cnt;
|
|
|
|
|
2024-06-18 02:42:29 -07:00
|
|
|
/* Make sure irqfd has been initialized in assign path */
|
|
|
|
synchronize_srcu(&irqfds_srcu);
|
|
|
|
|
2023-08-22 02:45:07 -07:00
|
|
|
eventfd_ctx_remove_wait_queue(kirqfd->eventfd, &kirqfd->wait, &cnt);
|
|
|
|
eventfd_ctx_put(kirqfd->eventfd);
|
|
|
|
kfree(kirqfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void irqfd_inject(struct privcmd_kernel_irqfd *kirqfd)
|
|
|
|
{
|
|
|
|
u64 cnt;
|
|
|
|
long rc;
|
|
|
|
|
|
|
|
eventfd_ctx_do_read(kirqfd->eventfd, &cnt);
|
|
|
|
|
|
|
|
xen_preemptible_hcall_begin();
|
|
|
|
rc = HYPERVISOR_dm_op(kirqfd->dom, 1, &kirqfd->xbufs);
|
|
|
|
xen_preemptible_hcall_end();
|
|
|
|
|
|
|
|
/* Don't repeat the error message for consecutive failures */
|
|
|
|
if (rc && !kirqfd->error) {
|
|
|
|
pr_err("Failed to configure irq for guest domain: %d\n",
|
|
|
|
kirqfd->dom);
|
|
|
|
}
|
|
|
|
|
|
|
|
kirqfd->error = rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd =
|
|
|
|
container_of(wait, struct privcmd_kernel_irqfd, wait);
|
|
|
|
__poll_t flags = key_to_poll(key);
|
|
|
|
|
|
|
|
if (flags & EPOLLIN)
|
|
|
|
irqfd_inject(kirqfd);
|
|
|
|
|
|
|
|
if (flags & EPOLLHUP) {
|
2024-06-18 02:42:28 -07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
irqfd_deactivate(kirqfd);
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_unlock_irqrestore(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
irqfd_poll_func(struct file *file, wait_queue_head_t *wqh, poll_table *pt)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd =
|
|
|
|
container_of(pt, struct privcmd_kernel_irqfd, pt);
|
|
|
|
|
|
|
|
add_wait_queue_priority(wqh, &kirqfd->wait);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_irqfd_assign(struct privcmd_irqfd *irqfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
2024-06-18 02:42:28 -07:00
|
|
|
unsigned long flags;
|
2023-08-22 02:45:07 -07:00
|
|
|
__poll_t events;
|
|
|
|
struct fd f;
|
|
|
|
void *dm_op;
|
2024-06-18 02:42:29 -07:00
|
|
|
int ret, idx;
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
kirqfd = kzalloc(sizeof(*kirqfd) + irqfd->size, GFP_KERNEL);
|
|
|
|
if (!kirqfd)
|
|
|
|
return -ENOMEM;
|
|
|
|
dm_op = kirqfd + 1;
|
|
|
|
|
2023-10-16 00:11:24 -07:00
|
|
|
if (copy_from_user(dm_op, u64_to_user_ptr(irqfd->dm_op), irqfd->size)) {
|
2023-08-22 02:45:07 -07:00
|
|
|
ret = -EFAULT;
|
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
|
|
|
kirqfd->xbufs.size = irqfd->size;
|
|
|
|
set_xen_guest_handle(kirqfd->xbufs.h, dm_op);
|
|
|
|
kirqfd->dom = irqfd->dom;
|
|
|
|
INIT_WORK(&kirqfd->shutdown, irqfd_shutdown);
|
|
|
|
|
|
|
|
f = fdget(irqfd->fd);
|
2024-05-31 11:12:01 -07:00
|
|
|
if (!fd_file(f)) {
|
2023-08-22 02:45:07 -07:00
|
|
|
ret = -EBADF;
|
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
2024-05-31 11:12:01 -07:00
|
|
|
kirqfd->eventfd = eventfd_ctx_fileget(fd_file(f));
|
2023-08-22 02:45:07 -07:00
|
|
|
if (IS_ERR(kirqfd->eventfd)) {
|
|
|
|
ret = PTR_ERR(kirqfd->eventfd);
|
|
|
|
goto error_fd_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Install our own custom wake-up handling so we are notified via a
|
|
|
|
* callback whenever someone signals the underlying eventfd.
|
|
|
|
*/
|
|
|
|
init_waitqueue_func_entry(&kirqfd->wait, irqfd_wakeup);
|
|
|
|
init_poll_funcptr(&kirqfd->pt, irqfd_poll_func);
|
|
|
|
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_lock_irqsave(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
list_for_each_entry(tmp, &irqfds_list, list) {
|
|
|
|
if (kirqfd->eventfd == tmp->eventfd) {
|
|
|
|
ret = -EBUSY;
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_unlock_irqrestore(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
goto error_eventfd;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-18 02:42:29 -07:00
|
|
|
idx = srcu_read_lock(&irqfds_srcu);
|
2023-08-22 02:45:07 -07:00
|
|
|
list_add_tail(&kirqfd->list, &irqfds_list);
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_unlock_irqrestore(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if there was an event already pending on the eventfd before we
|
|
|
|
* registered, and trigger it as if we didn't miss it.
|
|
|
|
*/
|
2024-05-31 11:12:01 -07:00
|
|
|
events = vfs_poll(fd_file(f), &kirqfd->pt);
|
2023-08-22 02:45:07 -07:00
|
|
|
if (events & EPOLLIN)
|
|
|
|
irqfd_inject(kirqfd);
|
|
|
|
|
2024-06-18 02:42:29 -07:00
|
|
|
srcu_read_unlock(&irqfds_srcu, idx);
|
|
|
|
|
2023-08-22 02:45:07 -07:00
|
|
|
/*
|
|
|
|
* Do not drop the file until the kirqfd is fully initialized, otherwise
|
|
|
|
* we might race against the EPOLLHUP.
|
|
|
|
*/
|
|
|
|
fdput(f);
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_eventfd:
|
|
|
|
eventfd_ctx_put(kirqfd->eventfd);
|
|
|
|
|
|
|
|
error_fd_put:
|
|
|
|
fdput(f);
|
|
|
|
|
|
|
|
error_kfree:
|
|
|
|
kfree(kirqfd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_irqfd_deassign(struct privcmd_irqfd *irqfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd;
|
|
|
|
struct eventfd_ctx *eventfd;
|
2024-06-18 02:42:28 -07:00
|
|
|
unsigned long flags;
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
eventfd = eventfd_ctx_fdget(irqfd->fd);
|
|
|
|
if (IS_ERR(eventfd))
|
|
|
|
return PTR_ERR(eventfd);
|
|
|
|
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_lock_irqsave(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
list_for_each_entry(kirqfd, &irqfds_list, list) {
|
|
|
|
if (kirqfd->eventfd == eventfd) {
|
|
|
|
irqfd_deactivate(kirqfd);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_unlock_irqrestore(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
eventfd_ctx_put(eventfd);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Block until we know all outstanding shutdown jobs have completed so
|
|
|
|
* that we guarantee there will not be any more interrupts once this
|
|
|
|
* deassign function returns.
|
|
|
|
*/
|
|
|
|
flush_workqueue(irqfd_cleanup_wq);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
struct privcmd_data *data = file->private_data;
|
|
|
|
struct privcmd_irqfd irqfd;
|
|
|
|
|
|
|
|
if (copy_from_user(&irqfd, udata, sizeof(irqfd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* No other flags should be set */
|
|
|
|
if (irqfd.flags & ~PRIVCMD_IRQFD_FLAG_DEASSIGN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != irqfd.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (irqfd.flags & PRIVCMD_IRQFD_FLAG_DEASSIGN)
|
|
|
|
return privcmd_irqfd_deassign(&irqfd);
|
|
|
|
|
|
|
|
return privcmd_irqfd_assign(&irqfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_irqfd_init(void)
|
|
|
|
{
|
|
|
|
irqfd_cleanup_wq = alloc_workqueue("privcmd-irqfd-cleanup", 0, 0);
|
|
|
|
if (!irqfd_cleanup_wq)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void privcmd_irqfd_exit(void)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_irqfd *kirqfd, *tmp;
|
2024-06-18 02:42:28 -07:00
|
|
|
unsigned long flags;
|
2023-08-22 02:45:07 -07:00
|
|
|
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_lock_irqsave(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
list_for_each_entry_safe(kirqfd, tmp, &irqfds_list, list)
|
|
|
|
irqfd_deactivate(kirqfd);
|
|
|
|
|
2024-06-18 02:42:28 -07:00
|
|
|
spin_unlock_irqrestore(&irqfds_lock, flags);
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
destroy_workqueue(irqfd_cleanup_wq);
|
|
|
|
}
|
2023-10-16 00:11:27 -07:00
|
|
|
|
|
|
|
/* Ioeventfd Support */
|
|
|
|
#define QUEUE_NOTIFY_VQ_MASK 0xFFFF
|
|
|
|
|
|
|
|
static DEFINE_MUTEX(ioreq_lock);
|
|
|
|
static LIST_HEAD(ioreq_list);
|
|
|
|
|
|
|
|
/* per-eventfd structure */
|
|
|
|
struct privcmd_kernel_ioeventfd {
|
|
|
|
struct eventfd_ctx *eventfd;
|
|
|
|
struct list_head list;
|
|
|
|
u64 addr;
|
|
|
|
unsigned int addr_len;
|
|
|
|
unsigned int vq;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* per-guest CPU / port structure */
|
|
|
|
struct ioreq_port {
|
|
|
|
int vcpu;
|
|
|
|
unsigned int port;
|
|
|
|
struct privcmd_kernel_ioreq *kioreq;
|
|
|
|
};
|
|
|
|
|
|
|
|
/* per-guest structure */
|
|
|
|
struct privcmd_kernel_ioreq {
|
|
|
|
domid_t dom;
|
|
|
|
unsigned int vcpus;
|
|
|
|
u64 uioreq;
|
|
|
|
struct ioreq *ioreq;
|
|
|
|
spinlock_t lock; /* Protects ioeventfds list */
|
|
|
|
struct list_head ioeventfds;
|
|
|
|
struct list_head list;
|
2023-11-16 11:54:59 -07:00
|
|
|
struct ioreq_port ports[] __counted_by(vcpus);
|
2023-10-16 00:11:27 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct ioreq_port *port = dev_id;
|
|
|
|
struct privcmd_kernel_ioreq *kioreq = port->kioreq;
|
|
|
|
struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
|
|
|
|
struct privcmd_kernel_ioeventfd *kioeventfd;
|
|
|
|
unsigned int state = STATE_IOREQ_READY;
|
|
|
|
|
|
|
|
if (ioreq->state != STATE_IOREQ_READY ||
|
|
|
|
ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
|
|
|
|
return IRQ_NONE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need a barrier, smp_mb(), here to ensure reads are finished before
|
|
|
|
* `state` is updated. Since the lock implementation ensures that
|
|
|
|
* appropriate barrier will be added anyway, we can avoid adding
|
|
|
|
* explicit barrier here.
|
|
|
|
*
|
|
|
|
* Ideally we don't need to update `state` within the locks, but we do
|
|
|
|
* that here to avoid adding explicit barrier.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock(&kioreq->lock);
|
|
|
|
ioreq->state = STATE_IOREQ_INPROCESS;
|
|
|
|
|
|
|
|
list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
|
|
|
|
if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
|
|
|
|
ioreq->size == kioeventfd->addr_len &&
|
|
|
|
(ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
|
2023-11-22 05:48:23 -07:00
|
|
|
eventfd_signal(kioeventfd->eventfd);
|
2023-10-16 00:11:27 -07:00
|
|
|
state = STATE_IORESP_READY;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock(&kioreq->lock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We need a barrier, smp_mb(), here to ensure writes are finished
|
|
|
|
* before `state` is updated. Since the lock implementation ensures that
|
|
|
|
* appropriate barrier will be added anyway, we can avoid adding
|
|
|
|
* explicit barrier here.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ioreq->state = state;
|
|
|
|
|
|
|
|
if (state == STATE_IORESP_READY) {
|
|
|
|
notify_remote_via_evtchn(port->port);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
|
|
|
|
{
|
|
|
|
struct ioreq_port *ports = kioreq->ports;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&ioreq_lock);
|
|
|
|
|
|
|
|
list_del(&kioreq->list);
|
|
|
|
|
|
|
|
for (i = kioreq->vcpus - 1; i >= 0; i--)
|
|
|
|
unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
|
|
|
|
|
|
|
|
kfree(kioreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
static
|
|
|
|
struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_ioreq *kioreq;
|
|
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
struct vm_area_struct *vma;
|
|
|
|
struct page **pages;
|
|
|
|
unsigned int *ports;
|
|
|
|
int ret, size, i;
|
|
|
|
|
|
|
|
lockdep_assert_held(&ioreq_lock);
|
|
|
|
|
|
|
|
size = struct_size(kioreq, ports, ioeventfd->vcpus);
|
|
|
|
kioreq = kzalloc(size, GFP_KERNEL);
|
|
|
|
if (!kioreq)
|
|
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
|
|
kioreq->dom = ioeventfd->dom;
|
|
|
|
kioreq->vcpus = ioeventfd->vcpus;
|
|
|
|
kioreq->uioreq = ioeventfd->ioreq;
|
|
|
|
spin_lock_init(&kioreq->lock);
|
|
|
|
INIT_LIST_HEAD(&kioreq->ioeventfds);
|
|
|
|
|
|
|
|
/* The memory for ioreq server must have been mapped earlier */
|
|
|
|
mmap_write_lock(mm);
|
|
|
|
vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
|
|
|
|
if (!vma) {
|
|
|
|
pr_err("Failed to find vma for ioreq page!\n");
|
|
|
|
mmap_write_unlock(mm);
|
|
|
|
ret = -EFAULT;
|
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
|
|
|
pages = vma->vm_private_data;
|
|
|
|
kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
|
|
|
|
mmap_write_unlock(mm);
|
|
|
|
|
2024-01-28 09:50:43 -07:00
|
|
|
ports = memdup_array_user(u64_to_user_ptr(ioeventfd->ports),
|
|
|
|
kioreq->vcpus, sizeof(*ports));
|
|
|
|
if (IS_ERR(ports)) {
|
|
|
|
ret = PTR_ERR(ports);
|
2023-10-16 00:11:27 -07:00
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (i = 0; i < kioreq->vcpus; i++) {
|
|
|
|
kioreq->ports[i].vcpu = i;
|
|
|
|
kioreq->ports[i].port = ports[i];
|
|
|
|
kioreq->ports[i].kioreq = kioreq;
|
|
|
|
|
|
|
|
ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
|
|
|
|
ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
|
|
|
|
&kioreq->ports[i]);
|
|
|
|
if (ret < 0)
|
|
|
|
goto error_unbind;
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(ports);
|
|
|
|
|
|
|
|
list_add_tail(&kioreq->list, &ioreq_list);
|
|
|
|
|
|
|
|
return kioreq;
|
|
|
|
|
|
|
|
error_unbind:
|
|
|
|
while (--i >= 0)
|
|
|
|
unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
|
2024-01-28 09:50:43 -07:00
|
|
|
|
2023-10-16 00:11:27 -07:00
|
|
|
kfree(ports);
|
|
|
|
error_kfree:
|
|
|
|
kfree(kioreq);
|
|
|
|
return ERR_PTR(ret);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct privcmd_kernel_ioreq *
|
|
|
|
get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_ioreq *kioreq;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
list_for_each_entry(kioreq, &ioreq_list, list) {
|
|
|
|
struct privcmd_kernel_ioeventfd *kioeventfd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* kioreq fields can be accessed here without a lock as they are
|
|
|
|
* never updated after being added to the ioreq_list.
|
|
|
|
*/
|
|
|
|
if (kioreq->uioreq != ioeventfd->ioreq) {
|
|
|
|
continue;
|
|
|
|
} else if (kioreq->dom != ioeventfd->dom ||
|
|
|
|
kioreq->vcpus != ioeventfd->vcpus) {
|
|
|
|
pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
|
|
|
|
kioreq->dom, ioeventfd->dom, kioreq->vcpus,
|
|
|
|
ioeventfd->vcpus);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look for a duplicate eventfd for the same guest */
|
|
|
|
spin_lock_irqsave(&kioreq->lock, flags);
|
|
|
|
list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
|
|
|
|
if (eventfd == kioeventfd->eventfd) {
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
return ERR_PTR(-EBUSY);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
|
|
|
|
return kioreq;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Matching kioreq isn't found, allocate a new one */
|
|
|
|
return alloc_ioreq(ioeventfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
|
|
|
|
{
|
|
|
|
list_del(&kioeventfd->list);
|
|
|
|
eventfd_ctx_put(kioeventfd->eventfd);
|
|
|
|
kfree(kioeventfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_ioeventfd *kioeventfd;
|
|
|
|
struct privcmd_kernel_ioreq *kioreq;
|
|
|
|
unsigned long flags;
|
|
|
|
struct fd f;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Check for range overflow */
|
|
|
|
if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* Vhost requires us to support length 1, 2, 4, and 8 */
|
|
|
|
if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
|
|
|
|
ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* 4096 vcpus limit enough ? */
|
|
|
|
if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
|
|
|
|
if (!kioeventfd)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
f = fdget(ioeventfd->event_fd);
|
2024-05-31 11:12:01 -07:00
|
|
|
if (!fd_file(f)) {
|
2023-10-16 00:11:27 -07:00
|
|
|
ret = -EBADF;
|
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
2024-05-31 11:12:01 -07:00
|
|
|
kioeventfd->eventfd = eventfd_ctx_fileget(fd_file(f));
|
2023-10-16 00:11:27 -07:00
|
|
|
fdput(f);
|
|
|
|
|
|
|
|
if (IS_ERR(kioeventfd->eventfd)) {
|
|
|
|
ret = PTR_ERR(kioeventfd->eventfd);
|
|
|
|
goto error_kfree;
|
|
|
|
}
|
|
|
|
|
|
|
|
kioeventfd->addr = ioeventfd->addr;
|
|
|
|
kioeventfd->addr_len = ioeventfd->addr_len;
|
|
|
|
kioeventfd->vq = ioeventfd->vq;
|
|
|
|
|
|
|
|
mutex_lock(&ioreq_lock);
|
|
|
|
kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
|
|
|
|
if (IS_ERR(kioreq)) {
|
|
|
|
mutex_unlock(&ioreq_lock);
|
|
|
|
ret = PTR_ERR(kioreq);
|
|
|
|
goto error_eventfd;
|
|
|
|
}
|
|
|
|
|
|
|
|
spin_lock_irqsave(&kioreq->lock, flags);
|
|
|
|
list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
|
|
|
|
mutex_unlock(&ioreq_lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error_eventfd:
|
|
|
|
eventfd_ctx_put(kioeventfd->eventfd);
|
|
|
|
|
|
|
|
error_kfree:
|
|
|
|
kfree(kioeventfd);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_ioreq *kioreq, *tkioreq;
|
|
|
|
struct eventfd_ctx *eventfd;
|
|
|
|
unsigned long flags;
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
|
|
|
|
if (IS_ERR(eventfd))
|
|
|
|
return PTR_ERR(eventfd);
|
|
|
|
|
|
|
|
mutex_lock(&ioreq_lock);
|
|
|
|
list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
|
|
|
|
struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
|
|
|
|
/*
|
|
|
|
* kioreq fields can be accessed here without a lock as they are
|
|
|
|
* never updated after being added to the ioreq_list.
|
|
|
|
*/
|
|
|
|
if (kioreq->dom != ioeventfd->dom ||
|
|
|
|
kioreq->uioreq != ioeventfd->ioreq ||
|
|
|
|
kioreq->vcpus != ioeventfd->vcpus)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&kioreq->lock, flags);
|
|
|
|
list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
|
|
|
|
if (eventfd == kioeventfd->eventfd) {
|
|
|
|
ioeventfd_free(kioeventfd);
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
|
|
|
|
if (list_empty(&kioreq->ioeventfds))
|
|
|
|
ioreq_free(kioreq);
|
|
|
|
goto unlock;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
|
|
|
|
ioeventfd->dom, ioeventfd->addr);
|
|
|
|
ret = -ENODEV;
|
|
|
|
|
|
|
|
unlock:
|
|
|
|
mutex_unlock(&ioreq_lock);
|
|
|
|
eventfd_ctx_put(eventfd);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
struct privcmd_data *data = file->private_data;
|
|
|
|
struct privcmd_ioeventfd ioeventfd;
|
|
|
|
|
|
|
|
if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
/* No other flags should be set */
|
|
|
|
if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* If restriction is in place, check the domid matches */
|
|
|
|
if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
|
|
|
|
return privcmd_ioeventfd_deassign(&ioeventfd);
|
|
|
|
|
|
|
|
return privcmd_ioeventfd_assign(&ioeventfd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void privcmd_ioeventfd_exit(void)
|
|
|
|
{
|
|
|
|
struct privcmd_kernel_ioreq *kioreq, *tmp;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
mutex_lock(&ioreq_lock);
|
|
|
|
list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
|
|
|
|
struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&kioreq->lock, flags);
|
|
|
|
list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
|
|
|
|
ioeventfd_free(kioeventfd);
|
|
|
|
spin_unlock_irqrestore(&kioreq->lock, flags);
|
|
|
|
|
|
|
|
ioreq_free(kioreq);
|
|
|
|
}
|
|
|
|
mutex_unlock(&ioreq_lock);
|
|
|
|
}
|
2023-08-22 02:45:07 -07:00
|
|
|
#else
|
|
|
|
static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int privcmd_irqfd_init(void)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void privcmd_irqfd_exit(void)
|
|
|
|
{
|
|
|
|
}
|
2023-10-16 00:11:27 -07:00
|
|
|
|
|
|
|
static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
|
|
|
|
{
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void privcmd_ioeventfd_exit(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
|
2023-08-22 02:45:07 -07:00
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
static long privcmd_ioctl(struct file *file,
|
|
|
|
unsigned int cmd, unsigned long data)
|
|
|
|
{
|
2017-02-13 10:03:22 -07:00
|
|
|
int ret = -ENOTTY;
|
2009-02-09 13:05:49 -07:00
|
|
|
void __user *udata = (void __user *) data;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case IOCTL_PRIVCMD_HYPERCALL:
|
2017-02-13 10:03:24 -07:00
|
|
|
ret = privcmd_ioctl_hypercall(file, udata);
|
2009-02-09 13:05:49 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case IOCTL_PRIVCMD_MMAP:
|
2017-02-13 10:03:24 -07:00
|
|
|
ret = privcmd_ioctl_mmap(file, udata);
|
2009-02-09 13:05:49 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case IOCTL_PRIVCMD_MMAPBATCH:
|
2017-02-13 10:03:24 -07:00
|
|
|
ret = privcmd_ioctl_mmap_batch(file, udata, 1);
|
2012-08-31 06:59:30 -07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case IOCTL_PRIVCMD_MMAPBATCH_V2:
|
2017-02-13 10:03:24 -07:00
|
|
|
ret = privcmd_ioctl_mmap_batch(file, udata, 2);
|
2009-02-09 13:05:49 -07:00
|
|
|
break;
|
|
|
|
|
2017-02-13 10:03:23 -07:00
|
|
|
case IOCTL_PRIVCMD_DM_OP:
|
2017-02-13 10:03:24 -07:00
|
|
|
ret = privcmd_ioctl_dm_op(file, udata);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case IOCTL_PRIVCMD_RESTRICT:
|
|
|
|
ret = privcmd_ioctl_restrict(file, udata);
|
2017-02-13 10:03:23 -07:00
|
|
|
break;
|
|
|
|
|
2018-05-09 06:16:12 -07:00
|
|
|
case IOCTL_PRIVCMD_MMAP_RESOURCE:
|
|
|
|
ret = privcmd_ioctl_mmap_resource(file, udata);
|
|
|
|
break;
|
|
|
|
|
2023-08-22 02:45:07 -07:00
|
|
|
case IOCTL_PRIVCMD_IRQFD:
|
|
|
|
ret = privcmd_ioctl_irqfd(file, udata);
|
|
|
|
break;
|
|
|
|
|
2023-10-16 00:11:27 -07:00
|
|
|
case IOCTL_PRIVCMD_IOEVENTFD:
|
|
|
|
ret = privcmd_ioctl_ioeventfd(file, udata);
|
|
|
|
break;
|
|
|
|
|
xen/privcmd: Add new syscall to get gsi from dev
On PVH dom0, when passthrough a device to domU, QEMU and xl tools
want to use gsi number to do pirq mapping, see QEMU code
xen_pt_realize->xc_physdev_map_pirq, and xl code
pci_add_dm_done->xc_physdev_map_pirq, but in current codes, the gsi
number is got from file /sys/bus/pci/devices/<sbdf>/irq, that is
wrong, because irq is not equal with gsi, they are in different
spaces, so pirq mapping fails.
And in current linux codes, there is no method to get gsi
for userspace.
For above purpose, record gsi of pcistub devices when init
pcistub and add a new syscall into privcmd to let userspace
can get gsi when they have a need.
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Signed-off-by: Jiqian Chen <Jiqian.Chen@amd.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
Message-ID: <20240924061437.2636766-4-Jiqian.Chen@amd.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2024-09-23 23:14:37 -07:00
|
|
|
case IOCTL_PRIVCMD_PCIDEV_GET_GSI:
|
|
|
|
ret = privcmd_ioctl_pcidev_get_gsi(file, udata);
|
|
|
|
break;
|
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-02-13 10:03:24 -07:00
|
|
|
static int privcmd_open(struct inode *ino, struct file *file)
|
|
|
|
{
|
|
|
|
struct privcmd_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
|
|
|
|
|
|
|
|
if (!data)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* DOMID_INVALID implies no restriction */
|
|
|
|
data->domid = DOMID_INVALID;
|
|
|
|
|
|
|
|
file->private_data = data;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_release(struct inode *ino, struct file *file)
|
|
|
|
{
|
|
|
|
struct privcmd_data *data = file->private_data;
|
|
|
|
|
|
|
|
kfree(data);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-10-17 17:11:21 -07:00
|
|
|
static void privcmd_close(struct vm_area_struct *vma)
|
|
|
|
{
|
|
|
|
struct page **pages = vma->vm_private_data;
|
2016-05-23 17:04:32 -07:00
|
|
|
int numpgs = vma_pages(vma);
|
2015-05-05 08:54:12 -07:00
|
|
|
int numgfns = (vma->vm_end - vma->vm_start) >> XEN_PAGE_SHIFT;
|
2013-12-06 10:55:56 -07:00
|
|
|
int rc;
|
2012-10-17 17:11:21 -07:00
|
|
|
|
2012-11-04 23:42:17 -07:00
|
|
|
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
|
2012-10-17 17:11:21 -07:00
|
|
|
return;
|
|
|
|
|
2015-05-05 08:54:12 -07:00
|
|
|
rc = xen_unmap_domain_gfn_range(vma, numgfns, pages);
|
2013-12-06 10:55:56 -07:00
|
|
|
if (rc == 0)
|
2020-09-01 01:33:26 -07:00
|
|
|
xen_free_unpopulated_pages(numpgs, pages);
|
2013-12-06 10:55:56 -07:00
|
|
|
else
|
|
|
|
pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
|
|
|
|
numpgs, rc);
|
2021-09-22 03:16:35 -07:00
|
|
|
kvfree(pages);
|
2012-10-17 17:11:21 -07:00
|
|
|
}
|
|
|
|
|
2018-04-14 12:15:42 -07:00
|
|
|
static vm_fault_t privcmd_fault(struct vm_fault *vmf)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2009-03-06 10:56:59 -07:00
|
|
|
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
|
2017-02-24 15:56:41 -07:00
|
|
|
vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
|
2016-12-14 16:07:01 -07:00
|
|
|
vmf->pgoff, (void *)vmf->address);
|
2009-03-06 10:56:59 -07:00
|
|
|
|
2009-02-09 13:05:49 -07:00
|
|
|
return VM_FAULT_SIGBUS;
|
|
|
|
}
|
|
|
|
|
2015-09-09 15:39:26 -07:00
|
|
|
static const struct vm_operations_struct privcmd_vm_ops = {
|
2012-10-17 17:11:21 -07:00
|
|
|
.close = privcmd_close,
|
2009-02-09 13:05:49 -07:00
|
|
|
.fault = privcmd_fault
|
|
|
|
};
|
|
|
|
|
|
|
|
static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
|
|
|
|
{
|
2010-11-11 13:37:43 -07:00
|
|
|
/* DONTCOPY is essential for Xen because copy_page_range doesn't know
|
|
|
|
* how to recreate these mappings */
|
2023-01-26 12:37:49 -07:00
|
|
|
vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTCOPY |
|
|
|
|
VM_DONTEXPAND | VM_DONTDUMP);
|
2009-02-09 13:05:49 -07:00
|
|
|
vma->vm_ops = &privcmd_vm_ops;
|
|
|
|
vma->vm_private_data = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-23 10:10:06 -07:00
|
|
|
/*
|
|
|
|
* For MMAPBATCH*. This allows asserting the singleshot mapping
|
|
|
|
* on a per pfn/pte basis. Mapping calls that fail with ENOENT
|
|
|
|
* can be then retried until success.
|
|
|
|
*/
|
2019-07-11 20:58:43 -07:00
|
|
|
static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data)
|
2013-08-23 10:10:06 -07:00
|
|
|
{
|
mm: ptep_get() conversion
Convert all instances of direct pte_t* dereferencing to instead use
ptep_get() helper. This means that by default, the accesses change from a
C dereference to a READ_ONCE(). This is technically the correct thing to
do since where pgtables are modified by HW (for access/dirty) they are
volatile and therefore we should always ensure READ_ONCE() semantics.
But more importantly, by always using the helper, it can be overridden by
the architecture to fully encapsulate the contents of the pte. Arch code
is deliberately not converted, as the arch code knows best. It is
intended that arch code (arm64) will override the default with its own
implementation that can (e.g.) hide certain bits from the core code, or
determine young/dirty status by mixing in state from another source.
Conversion was done using Coccinelle:
----
// $ make coccicheck \
// COCCI=ptepget.cocci \
// SPFLAGS="--include-headers" \
// MODE=patch
virtual patch
@ depends on patch @
pte_t *v;
@@
- *v
+ ptep_get(v)
----
Then reviewed and hand-edited to avoid multiple unnecessary calls to
ptep_get(), instead opting to store the result of a single call in a
variable, where it is correct to do so. This aims to negate any cost of
READ_ONCE() and will benefit arch-overrides that may be more complex.
Included is a fix for an issue in an earlier version of this patch that
was pointed out by kernel test robot. The issue arose because config
MMU=n elides definition of the ptep helper functions, including
ptep_get(). HUGETLB_PAGE=n configs still define a simple
huge_ptep_clear_flush() for linking purposes, which dereferences the ptep.
So when both configs are disabled, this caused a build error because
ptep_get() is not defined. Fix by continuing to do a direct dereference
when MMU=n. This is safe because for this config the arch code cannot be
trying to virtualize the ptes because none of the ptep helpers are
defined.
Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com
Reported-by: kernel test robot <lkp@intel.com>
Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/
Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Dave Airlie <airlied@gmail.com>
Cc: Dimitri Sivanich <dimitri.sivanich@hpe.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Naoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com>
Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: SeongJae Park <sj@kernel.org>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Uladzislau Rezki (Sony) <urezki@gmail.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Yu Zhao <yuzhao@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
2023-06-12 08:15:45 -07:00
|
|
|
return pte_none(ptep_get(pte)) ? 0 : -EBUSY;
|
2013-08-23 10:10:06 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int privcmd_vma_range_is_mapped(
|
|
|
|
struct vm_area_struct *vma,
|
|
|
|
unsigned long addr,
|
|
|
|
unsigned long nr_pages)
|
2009-02-09 13:05:49 -07:00
|
|
|
{
|
2013-08-23 10:10:06 -07:00
|
|
|
return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
|
|
|
|
is_mapped_fn, NULL) != 0;
|
2009-02-09 13:05:49 -07:00
|
|
|
}
|
|
|
|
|
2011-12-16 09:34:33 -07:00
|
|
|
const struct file_operations xen_privcmd_fops = {
|
|
|
|
.owner = THIS_MODULE,
|
2009-02-09 13:05:49 -07:00
|
|
|
.unlocked_ioctl = privcmd_ioctl,
|
2017-02-13 10:03:24 -07:00
|
|
|
.open = privcmd_open,
|
|
|
|
.release = privcmd_release,
|
2009-02-09 13:05:49 -07:00
|
|
|
.mmap = privcmd_mmap,
|
|
|
|
};
|
2011-12-16 09:34:33 -07:00
|
|
|
EXPORT_SYMBOL_GPL(xen_privcmd_fops);
|
|
|
|
|
|
|
|
static struct miscdevice privcmd_dev = {
|
|
|
|
.minor = MISC_DYNAMIC_MINOR,
|
|
|
|
.name = "xen/privcmd",
|
|
|
|
.fops = &xen_privcmd_fops,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init privcmd_init(void)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!xen_domain())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
err = misc_register(&privcmd_dev);
|
|
|
|
if (err != 0) {
|
2013-06-28 03:21:41 -07:00
|
|
|
pr_err("Could not register Xen privcmd device\n");
|
2011-12-16 09:34:33 -07:00
|
|
|
return err;
|
|
|
|
}
|
2018-06-18 00:36:39 -07:00
|
|
|
|
|
|
|
err = misc_register(&xen_privcmdbuf_dev);
|
|
|
|
if (err != 0) {
|
|
|
|
pr_err("Could not register Xen hypercall-buf device\n");
|
2023-08-22 02:45:07 -07:00
|
|
|
goto err_privcmdbuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = privcmd_irqfd_init();
|
|
|
|
if (err != 0) {
|
|
|
|
pr_err("irqfd init failed\n");
|
|
|
|
goto err_irqfd;
|
2018-06-18 00:36:39 -07:00
|
|
|
}
|
|
|
|
|
2011-12-16 09:34:33 -07:00
|
|
|
return 0;
|
2023-08-22 02:45:07 -07:00
|
|
|
|
|
|
|
err_irqfd:
|
|
|
|
misc_deregister(&xen_privcmdbuf_dev);
|
|
|
|
err_privcmdbuf:
|
|
|
|
misc_deregister(&privcmd_dev);
|
|
|
|
return err;
|
2011-12-16 09:34:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit privcmd_exit(void)
|
|
|
|
{
|
2023-10-16 00:11:27 -07:00
|
|
|
privcmd_ioeventfd_exit();
|
2023-08-22 02:45:07 -07:00
|
|
|
privcmd_irqfd_exit();
|
2011-12-16 09:34:33 -07:00
|
|
|
misc_deregister(&privcmd_dev);
|
2018-06-18 00:36:39 -07:00
|
|
|
misc_deregister(&xen_privcmdbuf_dev);
|
2011-12-16 09:34:33 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(privcmd_init);
|
|
|
|
module_exit(privcmd_exit);
|