2019-05-19 05:08:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-09-14 06:53:39 -07:00
|
|
|
#include <xen/xen.h>
|
2012-09-14 06:37:32 -07:00
|
|
|
#include <xen/events.h>
|
2012-09-13 05:06:52 -07:00
|
|
|
#include <xen/grant_table.h>
|
|
|
|
#include <xen/hvm.h>
|
2013-04-25 09:17:04 -07:00
|
|
|
#include <xen/interface/vcpu.h>
|
2012-09-14 06:53:39 -07:00
|
|
|
#include <xen/interface/xen.h>
|
|
|
|
#include <xen/interface/memory.h>
|
2012-09-13 05:06:52 -07:00
|
|
|
#include <xen/interface/hvm/params.h>
|
2012-08-08 10:20:18 -07:00
|
|
|
#include <xen/features.h>
|
2012-09-14 06:53:39 -07:00
|
|
|
#include <xen/platform_pci.h>
|
2012-09-13 05:06:52 -07:00
|
|
|
#include <xen/xenbus.h>
|
2012-10-03 04:28:26 -07:00
|
|
|
#include <xen/page.h>
|
2013-04-25 03:23:07 -07:00
|
|
|
#include <xen/interface/sched.h>
|
2012-10-03 08:37:09 -07:00
|
|
|
#include <xen/xen-ops.h>
|
2012-09-14 06:53:39 -07:00
|
|
|
#include <asm/xen/hypervisor.h>
|
|
|
|
#include <asm/xen/hypercall.h>
|
2013-04-25 03:23:07 -07:00
|
|
|
#include <asm/system_misc.h>
|
2016-05-12 05:19:54 -07:00
|
|
|
#include <asm/efi.h>
|
2012-09-14 06:37:32 -07:00
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/irqreturn.h>
|
2012-09-14 06:53:39 -07:00
|
|
|
#include <linux/module.h>
|
2012-09-14 03:47:52 -07:00
|
|
|
#include <linux/of.h>
|
2016-04-07 05:03:28 -07:00
|
|
|
#include <linux/of_fdt.h>
|
2012-09-14 03:47:52 -07:00
|
|
|
#include <linux/of_irq.h>
|
|
|
|
#include <linux/of_address.h>
|
2013-09-09 04:35:26 -07:00
|
|
|
#include <linux/cpuidle.h>
|
|
|
|
#include <linux/cpufreq.h>
|
2014-01-30 05:52:59 -07:00
|
|
|
#include <linux/cpu.h>
|
2015-05-06 07:14:22 -07:00
|
|
|
#include <linux/console.h>
|
2015-11-23 03:41:12 -07:00
|
|
|
#include <linux/pvclock_gtod.h>
|
2021-06-04 07:07:33 -07:00
|
|
|
#include <linux/reboot.h>
|
2015-11-23 03:41:12 -07:00
|
|
|
#include <linux/time64.h>
|
2015-11-23 03:40:12 -07:00
|
|
|
#include <linux/timekeeping.h>
|
2015-11-23 03:41:12 -07:00
|
|
|
#include <linux/timekeeper_internal.h>
|
2016-04-07 05:03:27 -07:00
|
|
|
#include <linux/acpi.h>
|
2022-06-21 23:38:38 -07:00
|
|
|
#include <linux/virtio_anchor.h>
|
2012-09-14 06:53:39 -07:00
|
|
|
|
2012-10-03 08:37:09 -07:00
|
|
|
#include <linux/mm.h>
|
|
|
|
|
2020-04-15 01:48:53 -07:00
|
|
|
static struct start_info _xen_start_info;
|
2012-09-14 06:53:39 -07:00
|
|
|
struct start_info *xen_start_info = &_xen_start_info;
|
2014-12-21 13:30:58 -07:00
|
|
|
EXPORT_SYMBOL(xen_start_info);
|
2012-09-14 06:53:39 -07:00
|
|
|
|
|
|
|
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
2014-12-21 13:30:58 -07:00
|
|
|
EXPORT_SYMBOL(xen_domain_type);
|
2012-09-14 06:53:39 -07:00
|
|
|
|
|
|
|
struct shared_info xen_dummy_shared_info;
|
|
|
|
struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
|
|
|
|
|
|
|
|
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
2013-04-25 09:17:04 -07:00
|
|
|
static struct vcpu_info __percpu *xen_vcpu_info;
|
2012-09-14 06:53:39 -07:00
|
|
|
|
2016-06-30 08:56:37 -07:00
|
|
|
/* Linux <-> Xen vCPU id mapping */
|
2016-07-29 02:06:48 -07:00
|
|
|
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
2016-06-30 08:56:37 -07:00
|
|
|
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
|
|
|
|
2012-10-03 04:28:26 -07:00
|
|
|
/* These are unused until we support booting "pre-ballooned" */
|
|
|
|
unsigned long xen_released_pages;
|
|
|
|
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
|
|
|
|
|
2015-05-06 07:09:06 -07:00
|
|
|
static __read_mostly unsigned int xen_events_irq;
|
2021-12-09 13:05:32 -07:00
|
|
|
static __read_mostly phys_addr_t xen_grant_frames;
|
|
|
|
|
|
|
|
#define GRANT_TABLE_INDEX 0
|
2021-12-09 13:05:35 -07:00
|
|
|
#define EXT_REGION_INDEX 1
|
2012-09-14 06:37:32 -07:00
|
|
|
|
2018-06-08 01:40:38 -07:00
|
|
|
uint32_t xen_start_flags;
|
|
|
|
EXPORT_SYMBOL(xen_start_flags);
|
|
|
|
|
2015-08-07 09:34:41 -07:00
|
|
|
int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
|
2012-10-03 08:37:09 -07:00
|
|
|
int nr, struct page **pages)
|
|
|
|
{
|
2015-03-11 07:49:56 -07:00
|
|
|
return xen_xlate_unmap_gfn_range(vma, nr, pages);
|
2012-10-03 08:37:09 -07:00
|
|
|
}
|
2015-08-07 09:34:41 -07:00
|
|
|
EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
|
2012-10-03 08:37:09 -07:00
|
|
|
|
2015-11-23 03:40:12 -07:00
|
|
|
static void xen_read_wallclock(struct timespec64 *ts)
|
|
|
|
{
|
|
|
|
u32 version;
|
|
|
|
struct timespec64 now, ts_monotonic;
|
|
|
|
struct shared_info *s = HYPERVISOR_shared_info;
|
|
|
|
struct pvclock_wall_clock *wall_clock = &(s->wc);
|
|
|
|
|
|
|
|
/* get wallclock at system boot */
|
|
|
|
do {
|
|
|
|
version = wall_clock->version;
|
|
|
|
rmb(); /* fetch version before time */
|
|
|
|
now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec;
|
|
|
|
now.tv_nsec = wall_clock->nsec;
|
|
|
|
rmb(); /* fetch time before checking version */
|
|
|
|
} while ((wall_clock->version & 1) || (version != wall_clock->version));
|
|
|
|
|
|
|
|
/* time since system boot */
|
|
|
|
ktime_get_ts64(&ts_monotonic);
|
|
|
|
*ts = timespec64_add(now, ts_monotonic);
|
|
|
|
}
|
|
|
|
|
2015-11-23 03:41:12 -07:00
|
|
|
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
|
|
|
|
unsigned long was_set, void *priv)
|
|
|
|
{
|
|
|
|
/* Protected by the calling core code serialization */
|
|
|
|
static struct timespec64 next_sync;
|
|
|
|
|
|
|
|
struct xen_platform_op op;
|
|
|
|
struct timespec64 now, system_time;
|
|
|
|
struct timekeeper *tk = priv;
|
|
|
|
|
|
|
|
now.tv_sec = tk->xtime_sec;
|
|
|
|
now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
|
|
|
|
system_time = timespec64_add(now, tk->wall_to_monotonic);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We only take the expensive HV call when the clock was set
|
|
|
|
* or when the 11 minutes RTC synchronization time elapsed.
|
|
|
|
*/
|
|
|
|
if (!was_set && timespec64_compare(&now, &next_sync) < 0)
|
|
|
|
return NOTIFY_OK;
|
|
|
|
|
|
|
|
op.cmd = XENPF_settime64;
|
|
|
|
op.u.settime64.mbz = 0;
|
|
|
|
op.u.settime64.secs = now.tv_sec;
|
|
|
|
op.u.settime64.nsecs = now.tv_nsec;
|
|
|
|
op.u.settime64.system_time = timespec64_to_ns(&system_time);
|
|
|
|
(void)HYPERVISOR_platform_op(&op);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Move the next drift compensation time 11 minutes
|
|
|
|
* ahead. That's emulating the sync_cmos_clock() update for
|
|
|
|
* the hardware RTC.
|
|
|
|
*/
|
|
|
|
next_sync = now;
|
|
|
|
next_sync.tv_sec += 11 * 60;
|
|
|
|
|
|
|
|
return NOTIFY_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block xen_pvclock_gtod_notifier = {
|
|
|
|
.notifier_call = xen_pvclock_gtod_notify,
|
|
|
|
};
|
|
|
|
|
2016-07-13 10:16:52 -07:00
|
|
|
static int xen_starting_cpu(unsigned int cpu)
|
2013-04-25 09:17:04 -07:00
|
|
|
{
|
|
|
|
struct vcpu_register_vcpu_info info;
|
|
|
|
struct vcpu_info *vcpup;
|
|
|
|
int err;
|
|
|
|
|
2015-10-22 09:22:46 -07:00
|
|
|
/*
|
|
|
|
* VCPUOP_register_vcpu_info cannot be called twice for the same
|
|
|
|
* vcpu, so if vcpu_info is already registered, just get out. This
|
|
|
|
* can happen with cpu-hotplug.
|
|
|
|
*/
|
|
|
|
if (per_cpu(xen_vcpu, cpu) != NULL)
|
|
|
|
goto after_register_vcpu_info;
|
|
|
|
|
2013-04-25 09:17:04 -07:00
|
|
|
pr_info("Xen: initializing cpu%d\n", cpu);
|
|
|
|
vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
|
|
|
|
|
2020-10-05 23:49:31 -07:00
|
|
|
info.mfn = percpu_to_gfn(vcpup);
|
2015-05-05 08:36:56 -07:00
|
|
|
info.offset = xen_offset_in_page(vcpup);
|
2013-04-25 09:17:04 -07:00
|
|
|
|
2016-06-30 08:56:38 -07:00
|
|
|
err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
|
|
|
|
&info);
|
2013-05-08 06:02:38 -07:00
|
|
|
BUG_ON(err);
|
|
|
|
per_cpu(xen_vcpu, cpu) = vcpup;
|
|
|
|
|
2015-10-22 09:22:46 -07:00
|
|
|
after_register_vcpu_info:
|
2013-05-08 04:59:01 -07:00
|
|
|
enable_percpu_irq(xen_events_irq, 0);
|
2016-07-13 10:16:52 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int xen_dying_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
disable_percpu_irq(xen_events_irq);
|
|
|
|
return 0;
|
2013-04-25 09:17:04 -07:00
|
|
|
}
|
|
|
|
|
2017-04-24 10:58:38 -07:00
|
|
|
void xen_reboot(int reason)
|
2013-04-25 03:23:07 -07:00
|
|
|
{
|
2017-04-24 10:58:38 -07:00
|
|
|
struct sched_shutdown r = { .reason = reason };
|
2013-04-25 03:23:07 -07:00
|
|
|
int rc;
|
2017-04-24 10:58:38 -07:00
|
|
|
|
2013-04-25 03:23:07 -07:00
|
|
|
rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
|
2014-07-25 09:05:39 -07:00
|
|
|
BUG_ON(rc);
|
2013-04-25 03:23:07 -07:00
|
|
|
}
|
|
|
|
|
2021-06-04 07:07:33 -07:00
|
|
|
static int xen_restart(struct notifier_block *nb, unsigned long action,
|
|
|
|
void *data)
|
2017-04-24 10:58:38 -07:00
|
|
|
{
|
|
|
|
xen_reboot(SHUTDOWN_reboot);
|
2021-06-04 07:07:33 -07:00
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
2017-04-24 10:58:38 -07:00
|
|
|
}
|
|
|
|
|
2021-06-04 07:07:33 -07:00
|
|
|
static struct notifier_block xen_restart_nb = {
|
|
|
|
.notifier_call = xen_restart,
|
|
|
|
.priority = 192,
|
|
|
|
};
|
2017-04-24 10:58:38 -07:00
|
|
|
|
2013-04-25 03:23:07 -07:00
|
|
|
static void xen_power_off(void)
|
|
|
|
{
|
2017-04-24 10:58:38 -07:00
|
|
|
xen_reboot(SHUTDOWN_poweroff);
|
2013-04-25 03:23:07 -07:00
|
|
|
}
|
|
|
|
|
2014-01-30 05:52:59 -07:00
|
|
|
static irqreturn_t xen_arm_callback(int irq, void *arg)
|
|
|
|
{
|
2023-08-24 08:34:21 -07:00
|
|
|
xen_evtchn_do_upcall();
|
2014-01-30 05:52:59 -07:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2016-04-07 05:03:28 -07:00
|
|
|
static __initdata struct {
|
|
|
|
const char *compat;
|
|
|
|
const char *prefix;
|
|
|
|
const char *version;
|
|
|
|
bool found;
|
|
|
|
} hyper_node = {"xen,xen", "xen,xen-", NULL, false};
|
|
|
|
|
|
|
|
static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
|
|
|
|
int depth, void *data)
|
|
|
|
{
|
|
|
|
const void *s = NULL;
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (depth != 1 || strcmp(uname, "hypervisor") != 0)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (of_flat_dt_is_compatible(node, hyper_node.compat))
|
|
|
|
hyper_node.found = true;
|
|
|
|
|
|
|
|
s = of_get_flat_dt_prop(node, "compatible", &len);
|
|
|
|
if (strlen(hyper_node.prefix) + 3 < len &&
|
|
|
|
!strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix)))
|
|
|
|
hyper_node.version = s + strlen(hyper_node.prefix);
|
|
|
|
|
2016-05-12 05:19:54 -07:00
|
|
|
/*
|
|
|
|
* Check if Xen supports EFI by checking whether there is the
|
|
|
|
* "/hypervisor/uefi" node in DT. If so, runtime services are available
|
|
|
|
* through proxy functions (e.g. in case of Xen dom0 EFI implementation
|
|
|
|
* they call special hypercall which executes relevant EFI functions)
|
|
|
|
* and that is why they are always enabled.
|
|
|
|
*/
|
|
|
|
if (IS_ENABLED(CONFIG_XEN_EFI)) {
|
|
|
|
if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
|
|
|
|
!efi_runtime_disabled())
|
|
|
|
set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
|
|
|
|
}
|
|
|
|
|
2016-04-07 05:03:28 -07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-09-14 03:47:52 -07:00
|
|
|
/*
|
|
|
|
* see Documentation/devicetree/bindings/arm/xen.txt for the
|
|
|
|
* documentation of the Xen Device Tree format.
|
|
|
|
*/
|
2015-05-06 07:13:31 -07:00
|
|
|
void __init xen_early_init(void)
|
2012-09-14 03:47:52 -07:00
|
|
|
{
|
2016-04-07 05:03:28 -07:00
|
|
|
of_scan_flat_dt(fdt_find_hyper_node, NULL);
|
|
|
|
if (!hyper_node.found) {
|
2012-09-14 03:47:52 -07:00
|
|
|
pr_debug("No Xen support\n");
|
2015-05-06 07:13:31 -07:00
|
|
|
return;
|
2012-09-14 03:47:52 -07:00
|
|
|
}
|
2016-04-07 05:03:28 -07:00
|
|
|
|
|
|
|
if (hyper_node.version == NULL) {
|
2012-09-14 03:47:52 -07:00
|
|
|
pr_debug("Xen version not found\n");
|
2015-05-06 07:13:31 -07:00
|
|
|
return;
|
2015-05-06 07:09:06 -07:00
|
|
|
}
|
|
|
|
|
2016-04-07 05:03:28 -07:00
|
|
|
pr_info("Xen %s support found\n", hyper_node.version);
|
2014-01-30 05:52:59 -07:00
|
|
|
|
2012-09-14 03:47:52 -07:00
|
|
|
xen_domain_type = XEN_HVM_DOMAIN;
|
|
|
|
|
2012-08-08 10:20:18 -07:00
|
|
|
xen_setup_features();
|
2014-09-10 15:49:30 -07:00
|
|
|
|
2012-08-08 10:20:18 -07:00
|
|
|
if (xen_feature(XENFEAT_dom0))
|
2018-06-08 01:40:38 -07:00
|
|
|
xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
|
2015-05-06 07:14:22 -07:00
|
|
|
|
|
|
|
if (!console_set_on_cmdline && !xen_initial_domain())
|
|
|
|
add_preferred_console("hvc", 0, NULL);
|
2015-05-06 07:13:31 -07:00
|
|
|
}
|
|
|
|
|
2016-04-07 05:03:27 -07:00
|
|
|
static void __init xen_acpi_guest_init(void)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_ACPI
|
|
|
|
struct xen_hvm_param a;
|
|
|
|
int interrupt, trigger, polarity;
|
|
|
|
|
|
|
|
a.domid = DOMID_SELF;
|
|
|
|
a.index = HVM_PARAM_CALLBACK_IRQ;
|
|
|
|
|
|
|
|
if (HYPERVISOR_hvm_op(HVMOP_get_param, &a)
|
|
|
|
|| (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) {
|
|
|
|
xen_events_irq = 0;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
interrupt = a.value & 0xff;
|
|
|
|
trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE
|
|
|
|
: ACPI_LEVEL_SENSITIVE;
|
|
|
|
polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW
|
|
|
|
: ACPI_ACTIVE_HIGH;
|
|
|
|
xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2021-12-09 13:05:35 -07:00
|
|
|
#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
|
|
|
|
/*
|
|
|
|
* A type-less specific Xen resource which contains extended regions
|
|
|
|
* (unused regions of guest physical address space provided by the hypervisor).
|
|
|
|
*/
|
|
|
|
static struct resource xen_resource = {
|
|
|
|
.name = "Xen unused space",
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init arch_xen_unpopulated_init(struct resource **res)
|
|
|
|
{
|
|
|
|
struct device_node *np;
|
|
|
|
struct resource *regs, *tmp_res;
|
|
|
|
uint64_t min_gpaddr = -1, max_gpaddr = 0;
|
|
|
|
unsigned int i, nr_reg = 0;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (!xen_domain())
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
if (!acpi_disabled)
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
np = of_find_compatible_node(NULL, NULL, "xen,xen");
|
|
|
|
if (WARN_ON(!np))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Skip region 0 which is reserved for grant table space */
|
|
|
|
while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
|
|
|
|
nr_reg++;
|
|
|
|
|
|
|
|
if (!nr_reg) {
|
|
|
|
pr_err("No extended regions are found\n");
|
2022-04-19 18:49:13 -07:00
|
|
|
of_node_put(np);
|
2021-12-09 13:05:35 -07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
|
2022-04-19 18:49:13 -07:00
|
|
|
if (!regs) {
|
|
|
|
of_node_put(np);
|
2021-12-09 13:05:35 -07:00
|
|
|
return -ENOMEM;
|
2022-04-19 18:49:13 -07:00
|
|
|
}
|
2021-12-09 13:05:35 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create resource from extended regions provided by the hypervisor to be
|
|
|
|
* used as unused address space for Xen scratch pages.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < nr_reg; i++) {
|
|
|
|
rc = of_address_to_resource(np, i + EXT_REGION_INDEX, ®s[i]);
|
|
|
|
if (rc)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (max_gpaddr < regs[i].end)
|
|
|
|
max_gpaddr = regs[i].end;
|
|
|
|
if (min_gpaddr > regs[i].start)
|
|
|
|
min_gpaddr = regs[i].start;
|
|
|
|
}
|
|
|
|
|
|
|
|
xen_resource.start = min_gpaddr;
|
|
|
|
xen_resource.end = max_gpaddr;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark holes between extended regions as unavailable. The rest of that
|
|
|
|
* address space will be available for the allocation.
|
|
|
|
*/
|
|
|
|
for (i = 1; i < nr_reg; i++) {
|
|
|
|
resource_size_t start, end;
|
|
|
|
|
|
|
|
/* There is an overlap between regions */
|
|
|
|
if (regs[i - 1].end + 1 > regs[i].start) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* There is no hole between regions */
|
|
|
|
if (regs[i - 1].end + 1 == regs[i].start)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
start = regs[i - 1].end + 1;
|
|
|
|
end = regs[i].start - 1;
|
|
|
|
|
|
|
|
tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
|
|
|
|
if (!tmp_res) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp_res->name = "Unavailable space";
|
|
|
|
tmp_res->start = start;
|
|
|
|
tmp_res->end = end;
|
|
|
|
|
|
|
|
rc = insert_resource(&xen_resource, tmp_res);
|
|
|
|
if (rc) {
|
|
|
|
pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc);
|
|
|
|
kfree(tmp_res);
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*res = &xen_resource;
|
|
|
|
|
|
|
|
err:
|
2022-04-19 18:49:13 -07:00
|
|
|
of_node_put(np);
|
2021-12-09 13:05:35 -07:00
|
|
|
kfree(regs);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2016-04-07 05:03:27 -07:00
|
|
|
static void __init xen_dt_guest_init(void)
|
|
|
|
{
|
2016-04-07 05:03:28 -07:00
|
|
|
struct device_node *xen_node;
|
2021-12-09 13:05:32 -07:00
|
|
|
struct resource res;
|
2016-04-07 05:03:28 -07:00
|
|
|
|
|
|
|
xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
|
|
|
|
if (!xen_node) {
|
|
|
|
pr_err("Xen support was detected before, but it has disappeared\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-04-07 05:03:27 -07:00
|
|
|
xen_events_irq = irq_of_parse_and_map(xen_node, 0);
|
2021-12-09 13:05:32 -07:00
|
|
|
|
|
|
|
if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
|
|
|
|
pr_err("Xen grant table region is not found\n");
|
2022-04-19 18:49:13 -07:00
|
|
|
of_node_put(xen_node);
|
2021-12-09 13:05:32 -07:00
|
|
|
return;
|
|
|
|
}
|
2022-04-19 18:49:13 -07:00
|
|
|
of_node_put(xen_node);
|
2021-12-09 13:05:32 -07:00
|
|
|
xen_grant_frames = res.start;
|
2016-04-07 05:03:27 -07:00
|
|
|
}
|
|
|
|
|
2015-05-06 07:13:31 -07:00
|
|
|
static int __init xen_guest_init(void)
|
|
|
|
{
|
|
|
|
struct xen_add_to_physmap xatp;
|
|
|
|
struct shared_info *shared_info_page = NULL;
|
2021-12-09 13:05:32 -07:00
|
|
|
int rc, cpu;
|
2015-05-06 07:13:31 -07:00
|
|
|
|
|
|
|
if (!xen_domain())
|
|
|
|
return 0;
|
|
|
|
|
2022-06-21 23:38:38 -07:00
|
|
|
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
|
2022-10-25 09:20:03 -07:00
|
|
|
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
|
2022-06-02 12:23:49 -07:00
|
|
|
|
2016-04-07 05:03:27 -07:00
|
|
|
if (!acpi_disabled)
|
|
|
|
xen_acpi_guest_init();
|
|
|
|
else
|
|
|
|
xen_dt_guest_init();
|
|
|
|
|
2015-05-06 07:13:31 -07:00
|
|
|
if (!xen_events_irq) {
|
|
|
|
pr_err("Xen event channel interrupt not found\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2016-05-12 05:19:54 -07:00
|
|
|
/*
|
|
|
|
* The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI
|
|
|
|
* parameters are found. Force enable runtime services.
|
|
|
|
*/
|
|
|
|
if (efi_enabled(EFI_RUNTIME_SERVICES))
|
|
|
|
xen_efi_runtime_setup();
|
|
|
|
|
2015-05-06 07:13:31 -07:00
|
|
|
shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
|
2012-08-08 10:20:18 -07:00
|
|
|
|
2012-09-14 03:47:52 -07:00
|
|
|
if (!shared_info_page) {
|
|
|
|
pr_err("not enough memory\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
xatp.domid = DOMID_SELF;
|
|
|
|
xatp.idx = 0;
|
|
|
|
xatp.space = XENMAPSPACE_shared_info;
|
2015-05-05 08:36:56 -07:00
|
|
|
xatp.gpfn = virt_to_gfn(shared_info_page);
|
2012-09-14 03:47:52 -07:00
|
|
|
if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
|
|
|
|
BUG();
|
|
|
|
|
|
|
|
HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
|
|
|
|
|
|
|
|
/* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
|
|
|
|
* page, we use it in the event channel upcall and in some pvclock
|
2013-04-25 09:17:04 -07:00
|
|
|
* related functions.
|
2012-09-14 03:47:52 -07:00
|
|
|
* The shared info contains exactly 1 CPU (the boot CPU). The guest
|
|
|
|
* is required to use VCPUOP_register_vcpu_info to place vcpu info
|
2013-04-25 09:17:04 -07:00
|
|
|
* for secondary CPUs as they are brought up.
|
|
|
|
* For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
|
|
|
|
*/
|
2023-11-22 16:07:41 -07:00
|
|
|
xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
|
|
|
|
1 << fls(sizeof(struct vcpu_info) - 1));
|
2013-04-25 09:17:04 -07:00
|
|
|
if (xen_vcpu_info == NULL)
|
|
|
|
return -ENOMEM;
|
2012-09-13 05:06:52 -07:00
|
|
|
|
2016-06-30 08:56:37 -07:00
|
|
|
/* Direct vCPU id mapping for ARM guests. */
|
2016-09-08 02:48:28 -07:00
|
|
|
for_each_possible_cpu(cpu)
|
|
|
|
per_cpu(xen_vcpu_id, cpu) = cpu;
|
2016-06-30 08:56:37 -07:00
|
|
|
|
2021-12-09 13:05:32 -07:00
|
|
|
if (!xen_grant_frames) {
|
|
|
|
xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
|
|
|
|
rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
|
|
|
|
&xen_auto_xlat_grant_frames.vaddr,
|
|
|
|
xen_auto_xlat_grant_frames.count);
|
|
|
|
} else
|
|
|
|
rc = gnttab_setup_auto_xlat_frames(xen_grant_frames);
|
|
|
|
if (rc) {
|
2014-01-06 08:40:36 -07:00
|
|
|
free_percpu(xen_vcpu_info);
|
2021-12-09 13:05:32 -07:00
|
|
|
return rc;
|
2014-01-06 08:40:36 -07:00
|
|
|
}
|
2012-09-13 05:06:52 -07:00
|
|
|
gnttab_init();
|
|
|
|
|
2013-09-09 04:35:26 -07:00
|
|
|
/*
|
|
|
|
* Making sure board specific code will not set up ops for
|
|
|
|
* cpu idle and cpu freq.
|
|
|
|
*/
|
|
|
|
disable_cpuidle();
|
|
|
|
disable_cpufreq();
|
|
|
|
|
2014-01-30 05:52:59 -07:00
|
|
|
xen_init_IRQ();
|
|
|
|
|
|
|
|
if (request_percpu_irq(xen_events_irq, xen_arm_callback,
|
|
|
|
"events", &xen_vcpu)) {
|
|
|
|
pr_err("Error request IRQ %d\n", xen_events_irq);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2015-11-23 03:41:12 -07:00
|
|
|
if (xen_initial_domain())
|
|
|
|
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
|
2015-11-23 03:35:12 -07:00
|
|
|
|
2016-07-13 10:16:52 -07:00
|
|
|
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
|
2016-12-21 12:19:54 -07:00
|
|
|
"arm/xen:starting", xen_starting_cpu,
|
2016-07-13 10:16:52 -07:00
|
|
|
xen_dying_cpu);
|
2013-05-08 04:59:01 -07:00
|
|
|
}
|
2014-01-30 05:52:59 -07:00
|
|
|
early_initcall(xen_guest_init);
|
2013-05-08 04:59:01 -07:00
|
|
|
|
arm64/arm: xen: enlighten: Fix KPTI checks
When KPTI is in use, we cannot register a runstate region as XEN
requires that this is always a valid VA, which we cannot guarantee. Due
to this, xen_starting_cpu() must avoid registering each CPU's runstate
region, and xen_guest_init() must avoid setting up features that depend
upon it.
We tried to ensure that in commit:
f88af7229f6f22ce (" xen/arm: do not setup the runstate info page if kpti is enabled")
... where we added checks for xen_kernel_unmapped_at_usr(), which wraps
arm64_kernel_unmapped_at_el0() on arm64 and is always false on 32-bit
arm.
Unfortunately, as xen_guest_init() is an early_initcall, this happens
before secondary CPUs are booted and arm64 has finalized the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap which backs
arm64_kernel_unmapped_at_el0(), and so this can subsequently be set as
secondary CPUs are onlined. On a big.LITTLE system where the boot CPU
does not require KPTI but some secondary CPUs do, this will result in
xen_guest_init() intializing features that depend on the runstate
region, and xen_starting_cpu() registering the runstate region on some
CPUs before KPTI is subsequent enabled, resulting the the problems the
aforementioned commit tried to avoid.
Handle this more robsutly by deferring the initialization of the
runstate region until secondary CPUs have been initialized and the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap has been finalized. The per-cpu work is
moved into a new hotplug starting function which is registered later
when we're certain that KPTI will not be used.
Fixes: f88af7229f6f ("xen/arm: do not setup the runstate info page if kpti is enabled")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Bertrand Marquis <bertrand.marquis@arm.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-10-16 03:24:25 -07:00
|
|
|
static int xen_starting_runstate_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
xen_setup_runstate_info(cpu);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init xen_late_init(void)
|
2013-05-08 04:59:01 -07:00
|
|
|
{
|
2013-08-29 05:43:52 -07:00
|
|
|
if (!xen_domain())
|
|
|
|
return -ENODEV;
|
|
|
|
|
2013-04-25 03:23:07 -07:00
|
|
|
pm_power_off = xen_power_off;
|
2021-06-04 07:07:33 -07:00
|
|
|
register_restart_handler(&xen_restart_nb);
|
2015-11-23 03:40:12 -07:00
|
|
|
if (!xen_initial_domain()) {
|
|
|
|
struct timespec64 ts;
|
|
|
|
xen_read_wallclock(&ts);
|
|
|
|
do_settimeofday64(&ts);
|
|
|
|
}
|
2013-04-25 03:23:07 -07:00
|
|
|
|
arm64/arm: xen: enlighten: Fix KPTI checks
When KPTI is in use, we cannot register a runstate region as XEN
requires that this is always a valid VA, which we cannot guarantee. Due
to this, xen_starting_cpu() must avoid registering each CPU's runstate
region, and xen_guest_init() must avoid setting up features that depend
upon it.
We tried to ensure that in commit:
f88af7229f6f22ce (" xen/arm: do not setup the runstate info page if kpti is enabled")
... where we added checks for xen_kernel_unmapped_at_usr(), which wraps
arm64_kernel_unmapped_at_el0() on arm64 and is always false on 32-bit
arm.
Unfortunately, as xen_guest_init() is an early_initcall, this happens
before secondary CPUs are booted and arm64 has finalized the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap which backs
arm64_kernel_unmapped_at_el0(), and so this can subsequently be set as
secondary CPUs are onlined. On a big.LITTLE system where the boot CPU
does not require KPTI but some secondary CPUs do, this will result in
xen_guest_init() intializing features that depend on the runstate
region, and xen_starting_cpu() registering the runstate region on some
CPUs before KPTI is subsequent enabled, resulting the the problems the
aforementioned commit tried to avoid.
Handle this more robsutly by deferring the initialization of the
runstate region until secondary CPUs have been initialized and the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap has been finalized. The per-cpu work is
moved into a new hotplug starting function which is registered later
when we're certain that KPTI will not be used.
Fixes: f88af7229f6f ("xen/arm: do not setup the runstate info page if kpti is enabled")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Bertrand Marquis <bertrand.marquis@arm.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-10-16 03:24:25 -07:00
|
|
|
if (xen_kernel_unmapped_at_usr())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
xen_time_setup_guest();
|
|
|
|
|
|
|
|
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
|
|
|
|
"arm/xen_runstate:starting",
|
|
|
|
xen_starting_runstate_cpu, NULL);
|
2012-09-14 03:47:52 -07:00
|
|
|
}
|
arm64/arm: xen: enlighten: Fix KPTI checks
When KPTI is in use, we cannot register a runstate region as XEN
requires that this is always a valid VA, which we cannot guarantee. Due
to this, xen_starting_cpu() must avoid registering each CPU's runstate
region, and xen_guest_init() must avoid setting up features that depend
upon it.
We tried to ensure that in commit:
f88af7229f6f22ce (" xen/arm: do not setup the runstate info page if kpti is enabled")
... where we added checks for xen_kernel_unmapped_at_usr(), which wraps
arm64_kernel_unmapped_at_el0() on arm64 and is always false on 32-bit
arm.
Unfortunately, as xen_guest_init() is an early_initcall, this happens
before secondary CPUs are booted and arm64 has finalized the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap which backs
arm64_kernel_unmapped_at_el0(), and so this can subsequently be set as
secondary CPUs are onlined. On a big.LITTLE system where the boot CPU
does not require KPTI but some secondary CPUs do, this will result in
xen_guest_init() intializing features that depend on the runstate
region, and xen_starting_cpu() registering the runstate region on some
CPUs before KPTI is subsequent enabled, resulting the the problems the
aforementioned commit tried to avoid.
Handle this more robsutly by deferring the initialization of the
runstate region until secondary CPUs have been initialized and the
ARM64_UNMAP_KERNEL_AT_EL0 cpucap has been finalized. The per-cpu work is
moved into a new hotplug starting function which is registered later
when we're certain that KPTI will not be used.
Fixes: f88af7229f6f ("xen/arm: do not setup the runstate info page if kpti is enabled")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Bertrand Marquis <bertrand.marquis@arm.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2023-10-16 03:24:25 -07:00
|
|
|
late_initcall(xen_late_init);
|
2012-09-14 06:37:32 -07:00
|
|
|
|
2014-05-08 08:54:02 -07:00
|
|
|
|
|
|
|
/* empty stubs */
|
|
|
|
void xen_arch_pre_suspend(void) { }
|
|
|
|
void xen_arch_post_suspend(int suspend_cancelled) { }
|
|
|
|
void xen_timer_resume(void) { }
|
|
|
|
void xen_arch_resume(void) { }
|
2015-05-07 09:55:23 -07:00
|
|
|
void xen_arch_suspend(void) { }
|
2014-05-08 08:54:02 -07:00
|
|
|
|
|
|
|
|
2015-09-14 06:20:52 -07:00
|
|
|
/* In the hypercall.S file. */
|
2012-11-06 15:06:52 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
|
2012-11-08 08:58:55 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
|
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
|
2013-04-25 06:53:05 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
|
2019-10-01 10:38:03 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
|
2014-05-09 09:10:49 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
|
2016-07-05 22:00:29 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
|
2017-02-13 10:03:23 -07:00
|
|
|
EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
|
2012-11-06 15:06:52 -07:00
|
|
|
EXPORT_SYMBOL_GPL(privcmd_call);
|