2019-05-23 02:14:39 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2006-12-06 18:14:07 -07:00
|
|
|
/* Paravirtualization interfaces
|
|
|
|
Copyright (C) 2006 Rusty Russell IBM Corporation
|
|
|
|
|
2008-01-30 05:32:04 -07:00
|
|
|
|
|
|
|
2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
|
2006-12-06 18:14:07 -07:00
|
|
|
*/
|
2008-01-30 05:32:04 -07:00
|
|
|
|
2006-12-06 18:14:07 -07:00
|
|
|
#include <linux/errno.h>
|
2016-07-13 17:18:56 -07:00
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/export.h>
|
2006-12-06 18:14:07 -07:00
|
|
|
#include <linux/efi.h>
|
|
|
|
#include <linux/bcd.h>
|
2007-05-02 10:27:15 -07:00
|
|
|
#include <linux/highmem.h>
|
2014-04-17 01:17:05 -07:00
|
|
|
#include <linux/kprobes.h>
|
2020-06-08 21:32:42 -07:00
|
|
|
#include <linux/pgtable.h>
|
2021-03-11 07:23:09 -07:00
|
|
|
#include <linux/static_call.h>
|
2006-12-06 18:14:07 -07:00
|
|
|
|
|
|
|
#include <asm/bug.h>
|
|
|
|
#include <asm/paravirt.h>
|
2012-01-20 16:35:53 -07:00
|
|
|
#include <asm/debugreg.h>
|
2006-12-06 18:14:07 -07:00
|
|
|
#include <asm/desc.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/time.h>
|
2008-06-24 21:19:12 -07:00
|
|
|
#include <asm/pgalloc.h>
|
2006-12-06 18:14:07 -07:00
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/delay.h>
|
2006-12-06 18:14:08 -07:00
|
|
|
#include <asm/fixmap.h>
|
|
|
|
#include <asm/apic.h>
|
2006-12-06 18:14:08 -07:00
|
|
|
#include <asm/tlbflush.h>
|
2007-03-05 01:30:35 -07:00
|
|
|
#include <asm/timer.h>
|
2012-03-28 10:11:12 -07:00
|
|
|
#include <asm/special_insns.h>
|
2018-08-22 08:30:16 -07:00
|
|
|
#include <asm/tlb.h>
|
2020-02-18 08:47:12 -07:00
|
|
|
#include <asm/io_bitmap.h>
|
2023-01-12 00:20:31 -07:00
|
|
|
#include <asm/gsseg.h>
|
2006-12-06 18:14:07 -07:00
|
|
|
|
2021-10-28 00:27:48 -07:00
|
|
|
/* stub always returning 0. */
|
2023-11-29 06:33:29 -07:00
|
|
|
DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text);
|
2021-10-28 00:27:48 -07:00
|
|
|
|
2009-08-20 04:19:57 -07:00
|
|
|
void __init default_banner(void)
|
2006-12-06 18:14:07 -07:00
|
|
|
{
|
|
|
|
printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
|
2007-10-16 11:51:29 -07:00
|
|
|
pv_info.name);
|
2006-12-06 18:14:07 -07:00
|
|
|
}
|
|
|
|
|
2018-08-28 00:40:23 -07:00
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2023-11-29 06:33:29 -07:00
|
|
|
DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text);
|
|
|
|
DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text);
|
|
|
|
DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text);
|
|
|
|
DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text);
|
|
|
|
DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text);
|
2018-08-28 00:40:23 -07:00
|
|
|
#endif
|
2007-05-02 10:27:14 -07:00
|
|
|
|
2024-08-06 04:22:07 -07:00
|
|
|
DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
|
2017-09-06 10:36:24 -07:00
|
|
|
|
|
|
|
void __init native_pv_lock_init(void)
|
|
|
|
{
|
2024-08-06 04:22:07 -07:00
|
|
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
|
|
|
static_branch_enable(&virt_spin_lock_key);
|
2017-09-06 10:36:24 -07:00
|
|
|
}
|
|
|
|
|
2023-07-26 16:11:43 -07:00
|
|
|
static void native_tlb_remove_table(struct mmu_gather *tlb, void *table)
|
|
|
|
{
|
|
|
|
tlb_remove_page(tlb, table);
|
|
|
|
}
|
|
|
|
|
2012-02-24 00:31:31 -07:00
|
|
|
struct static_key paravirt_steal_enabled;
|
|
|
|
struct static_key paravirt_steal_rq_enabled;
|
2011-07-11 12:28:15 -07:00
|
|
|
|
|
|
|
static u64 native_steal_clock(int cpu)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-03-11 07:23:09 -07:00
|
|
|
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
|
|
|
|
DEFINE_STATIC_CALL(pv_sched_clock, native_sched_clock);
|
|
|
|
|
|
|
|
void paravirt_set_sched_clock(u64 (*func)(void))
|
|
|
|
{
|
|
|
|
static_call_update(pv_sched_clock, func);
|
|
|
|
}
|
|
|
|
|
2006-12-06 18:14:07 -07:00
|
|
|
/* These are in entry.S */
|
2007-07-17 18:37:04 -07:00
|
|
|
static struct resource reserve_ioports = {
|
|
|
|
.start = 0,
|
|
|
|
.end = IO_SPACE_LIMIT,
|
|
|
|
.name = "paravirt-ioport",
|
|
|
|
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reserve the whole legacy IO space to prevent any legacy drivers
|
|
|
|
* from wasting time probing for their hardware. This is a fairly
|
|
|
|
* brute-force approach to disabling all non-virtual drivers.
|
|
|
|
*
|
|
|
|
* Note that this must be called very early to have any effect.
|
|
|
|
*/
|
|
|
|
int paravirt_disable_iospace(void)
|
|
|
|
{
|
2008-03-27 17:28:40 -07:00
|
|
|
return request_resource(&ioport_resource, &reserve_ioports);
|
2007-07-17 18:37:04 -07:00
|
|
|
}
|
|
|
|
|
2018-08-28 00:40:23 -07:00
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2021-06-24 02:41:14 -07:00
|
|
|
static noinstr void pv_native_write_cr2(unsigned long val)
|
|
|
|
{
|
|
|
|
native_write_cr2(val);
|
|
|
|
}
|
2021-06-24 02:41:15 -07:00
|
|
|
|
|
|
|
static noinstr unsigned long pv_native_get_debugreg(int regno)
|
|
|
|
{
|
|
|
|
return native_get_debugreg(regno);
|
|
|
|
}
|
2021-06-24 02:41:16 -07:00
|
|
|
|
|
|
|
static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
|
|
|
|
{
|
|
|
|
native_set_debugreg(regno, val);
|
|
|
|
}
|
2021-06-24 02:41:19 -07:00
|
|
|
|
2023-01-12 12:43:44 -07:00
|
|
|
noinstr void pv_native_wbinvd(void)
|
|
|
|
{
|
|
|
|
native_wbinvd();
|
|
|
|
}
|
|
|
|
|
|
|
|
static noinstr void pv_native_safe_halt(void)
|
|
|
|
{
|
|
|
|
native_safe_halt();
|
|
|
|
}
|
2018-08-28 00:40:23 -07:00
|
|
|
#endif
|
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
2007-10-16 11:51:29 -07:00
|
|
|
|
2007-10-16 11:51:29 -07:00
|
|
|
struct pv_info pv_info = {
|
2006-12-06 18:14:07 -07:00
|
|
|
.name = "bare hardware",
|
2018-08-28 00:40:22 -07:00
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2011-08-03 06:31:53 -07:00
|
|
|
.extra_user_64bit_cs = __USER_CS,
|
|
|
|
#endif
|
2007-10-16 11:51:29 -07:00
|
|
|
};
|
2006-12-06 18:14:07 -07:00
|
|
|
|
2009-01-28 15:35:02 -07:00
|
|
|
/* 64-bit pagetable entries */
|
2009-01-28 15:35:07 -07:00
|
|
|
#define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
|
2009-01-28 15:35:02 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
struct paravirt_patch_template pv_ops = {
|
|
|
|
/* Cpu ops. */
|
2018-08-28 00:40:23 -07:00
|
|
|
.cpu.io_delay = native_io_delay,
|
2006-12-06 18:14:08 -07:00
|
|
|
|
2018-08-28 00:40:23 -07:00
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.cpuid = native_cpuid,
|
2021-06-24 02:41:15 -07:00
|
|
|
.cpu.get_debugreg = pv_native_get_debugreg,
|
2021-06-24 02:41:16 -07:00
|
|
|
.cpu.set_debugreg = pv_native_set_debugreg,
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.read_cr0 = native_read_cr0,
|
|
|
|
.cpu.write_cr0 = native_write_cr0,
|
|
|
|
.cpu.write_cr4 = native_write_cr4,
|
2023-01-12 12:43:44 -07:00
|
|
|
.cpu.wbinvd = pv_native_wbinvd,
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.read_msr = native_read_msr,
|
|
|
|
.cpu.write_msr = native_write_msr,
|
|
|
|
.cpu.read_msr_safe = native_read_msr_safe,
|
|
|
|
.cpu.write_msr_safe = native_write_msr_safe,
|
|
|
|
.cpu.read_pmc = native_read_pmc,
|
|
|
|
.cpu.load_tr_desc = native_load_tr_desc,
|
|
|
|
.cpu.set_ldt = native_set_ldt,
|
|
|
|
.cpu.load_gdt = native_load_gdt,
|
|
|
|
.cpu.load_idt = native_load_idt,
|
|
|
|
.cpu.store_tr = native_store_tr,
|
|
|
|
.cpu.load_tls = native_load_tls,
|
|
|
|
.cpu.load_gs_index = native_load_gs_index,
|
|
|
|
.cpu.write_ldt_entry = native_write_ldt_entry,
|
|
|
|
.cpu.write_gdt_entry = native_write_gdt_entry,
|
|
|
|
.cpu.write_idt_entry = native_write_idt_entry,
|
2008-06-24 21:19:12 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.alloc_ldt = paravirt_nop,
|
|
|
|
.cpu.free_ldt = paravirt_nop,
|
2007-02-13 05:26:21 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.load_sp0 = native_load_sp0,
|
2007-05-02 10:27:13 -07:00
|
|
|
|
2020-02-18 08:47:12 -07:00
|
|
|
#ifdef CONFIG_X86_IOPL_IOPERM
|
2020-07-17 16:53:55 -07:00
|
|
|
.cpu.invalidate_io_bitmap = native_tss_invalidate_io_bitmap,
|
|
|
|
.cpu.update_io_bitmap = native_tss_update_io_bitmap,
|
2020-02-18 08:47:12 -07:00
|
|
|
#endif
|
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.cpu.start_context_switch = paravirt_nop,
|
|
|
|
.cpu.end_context_switch = paravirt_nop,
|
|
|
|
|
|
|
|
/* Irq ops. */
|
2023-03-16 23:33:25 -07:00
|
|
|
.irq.save_fl = __PV_IS_CALLEE_SAVE(pv_native_save_fl),
|
2021-06-24 02:41:20 -07:00
|
|
|
.irq.irq_disable = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
|
2021-06-24 02:41:19 -07:00
|
|
|
.irq.irq_enable = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
|
2023-01-12 12:43:44 -07:00
|
|
|
.irq.safe_halt = pv_native_safe_halt,
|
2018-08-28 00:40:19 -07:00
|
|
|
.irq.halt = native_halt,
|
2018-08-28 00:40:24 -07:00
|
|
|
#endif /* CONFIG_PARAVIRT_XXL */
|
2018-08-28 00:40:19 -07:00
|
|
|
|
|
|
|
/* Mmu ops. */
|
2020-04-21 02:20:32 -07:00
|
|
|
.mmu.flush_tlb_user = native_flush_tlb_local,
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.flush_tlb_kernel = native_flush_tlb_global,
|
|
|
|
.mmu.flush_tlb_one_user = native_flush_tlb_one_user,
|
2021-02-20 16:17:07 -07:00
|
|
|
.mmu.flush_tlb_multi = native_flush_tlb_multi,
|
2023-07-26 16:11:43 -07:00
|
|
|
.mmu.tlb_remove_table = native_tlb_remove_table,
|
2018-08-28 00:40:19 -07:00
|
|
|
|
2018-08-28 00:40:25 -07:00
|
|
|
.mmu.exit_mmap = paravirt_nop,
|
2021-08-24 04:05:00 -07:00
|
|
|
.mmu.notify_page_enc_status_changed = paravirt_nop,
|
2018-08-28 00:40:25 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2021-06-24 02:41:13 -07:00
|
|
|
.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
|
2021-06-24 02:41:14 -07:00
|
|
|
.mmu.write_cr2 = pv_native_write_cr2,
|
2018-08-28 00:40:25 -07:00
|
|
|
.mmu.read_cr3 = __native_read_cr3,
|
|
|
|
.mmu.write_cr3 = native_write_cr3,
|
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.pgd_alloc = __paravirt_pgd_alloc,
|
|
|
|
.mmu.pgd_free = paravirt_nop,
|
|
|
|
|
|
|
|
.mmu.alloc_pte = paravirt_nop,
|
|
|
|
.mmu.alloc_pmd = paravirt_nop,
|
|
|
|
.mmu.alloc_pud = paravirt_nop,
|
|
|
|
.mmu.alloc_p4d = paravirt_nop,
|
|
|
|
.mmu.release_pte = paravirt_nop,
|
|
|
|
.mmu.release_pmd = paravirt_nop,
|
|
|
|
.mmu.release_pud = paravirt_nop,
|
|
|
|
.mmu.release_p4d = paravirt_nop,
|
|
|
|
|
|
|
|
.mmu.set_pte = native_set_pte,
|
|
|
|
.mmu.set_pmd = native_set_pmd,
|
|
|
|
|
|
|
|
.mmu.ptep_modify_prot_start = __ptep_modify_prot_start,
|
|
|
|
.mmu.ptep_modify_prot_commit = __ptep_modify_prot_commit,
|
2008-06-16 04:30:01 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.set_pud = native_set_pud,
|
2009-01-28 15:35:07 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.pmd_val = PTE_IDENT,
|
|
|
|
.mmu.make_pmd = PTE_IDENT,
|
2008-01-30 05:33:20 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.pud_val = PTE_IDENT,
|
|
|
|
.mmu.make_pud = PTE_IDENT,
|
2009-01-28 15:35:07 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.set_p4d = native_set_p4d,
|
2017-03-17 11:55:15 -07:00
|
|
|
|
|
|
|
#if CONFIG_PGTABLE_LEVELS >= 5
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.p4d_val = PTE_IDENT,
|
|
|
|
.mmu.make_p4d = PTE_IDENT,
|
2017-03-30 01:07:28 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.set_pgd = native_set_pgd,
|
2017-03-30 01:07:28 -07:00
|
|
|
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
|
2006-12-06 18:14:08 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.pte_val = PTE_IDENT,
|
|
|
|
.mmu.pgd_val = PTE_IDENT,
|
2007-05-02 10:27:13 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.make_pte = PTE_IDENT,
|
|
|
|
.mmu.make_pgd = PTE_IDENT,
|
2007-05-02 10:27:13 -07:00
|
|
|
|
2023-02-07 00:59:02 -07:00
|
|
|
.mmu.enter_mmap = paravirt_nop,
|
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
2007-10-16 11:51:29 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.lazy_mode = {
|
|
|
|
.enter = paravirt_nop,
|
|
|
|
.leave = paravirt_nop,
|
|
|
|
.flush = paravirt_nop,
|
paravirt: clean up lazy mode handling
Currently, the set_lazy_mode pv_op is overloaded with 5 functions:
1. enter lazy cpu mode
2. leave lazy cpu mode
3. enter lazy mmu mode
4. leave lazy mmu mode
5. flush pending batched operations
This complicates each paravirt backend, since it needs to deal with
all the possible state transitions, handling flushing, etc. In
particular, flushing is quite distinct from the other 4 functions, and
seems to just cause complication.
This patch removes the set_lazy_mode operation, and adds "enter" and
"leave" lazy mode operations on mmu_ops and cpu_ops. All the logic
associated with enter and leaving lazy states is now in common code
(basically BUG_ONs to make sure that no mode is current when entering
a lazy mode, and make sure that the mode is current when leaving).
Also, flush is handled in a common way, by simply leaving and
re-entering the lazy mode.
The result is that the Xen, lguest and VMI lazy mode implementations
are much simpler.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Zach Amsden <zach@vmware.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Avi Kivity <avi@qumranet.com>
Cc: Anthony Liguory <aliguori@us.ibm.com>
Cc: "Glauber de Oliveira Costa" <glommer@gmail.com>
Cc: Jun Nakajima <jun.nakajima@intel.com>
2007-10-16 11:51:29 -07:00
|
|
|
},
|
2008-06-17 11:42:01 -07:00
|
|
|
|
2018-08-28 00:40:19 -07:00
|
|
|
.mmu.set_fixmap = native_set_fixmap,
|
2018-08-28 00:40:25 -07:00
|
|
|
#endif /* CONFIG_PARAVIRT_XXL */
|
2018-08-28 00:40:19 -07:00
|
|
|
|
|
|
|
#if defined(CONFIG_PARAVIRT_SPINLOCKS)
|
|
|
|
/* Lock ops. */
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
.lock.queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
|
|
|
|
.lock.queued_spin_unlock =
|
|
|
|
PV_CALLEE_SAVE(__native_queued_spin_unlock),
|
|
|
|
.lock.wait = paravirt_nop,
|
|
|
|
.lock.kick = paravirt_nop,
|
|
|
|
.lock.vcpu_is_preempted =
|
|
|
|
PV_CALLEE_SAVE(__native_vcpu_is_preempted),
|
|
|
|
#endif /* SMP */
|
|
|
|
#endif
|
2006-12-06 18:14:07 -07:00
|
|
|
};
|
2007-01-22 21:40:36 -07:00
|
|
|
|
2018-08-28 00:40:23 -07:00
|
|
|
#ifdef CONFIG_PARAVIRT_XXL
|
2018-08-28 00:40:19 -07:00
|
|
|
NOKPROBE_SYMBOL(native_load_idt);
|
2018-08-28 00:40:23 -07:00
|
|
|
#endif
|
2018-08-28 00:40:19 -07:00
|
|
|
|
2018-10-29 08:01:16 -07:00
|
|
|
EXPORT_SYMBOL(pv_ops);
|
2007-10-16 11:51:29 -07:00
|
|
|
EXPORT_SYMBOL_GPL(pv_info);
|