diff --git a/arch/x86/Makefile b/arch/x86/Makefile index cd75e78a06c1..5b773b34768d 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -142,9 +142,10 @@ ifeq ($(CONFIG_X86_32),y) ifeq ($(CONFIG_STACKPROTECTOR),y) ifeq ($(CONFIG_SMP),y) - KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard + KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \ + -mstack-protector-guard-symbol=__ref_stack_chk_guard else - KBUILD_CFLAGS += -mstack-protector-guard=global + KBUILD_CFLAGS += -mstack-protector-guard=global endif endif else diff --git a/arch/x86/entry/entry.S b/arch/x86/entry/entry.S index 324686bca368..b7ea3e8e9ecc 100644 --- a/arch/x86/entry/entry.S +++ b/arch/x86/entry/entry.S @@ -51,3 +51,19 @@ EXPORT_SYMBOL_GPL(mds_verw_sel); .popsection THUNK warn_thunk_thunk, __warn_thunk + +#ifndef CONFIG_X86_64 +/* + * Clang's implementation of TLS stack cookies requires the variable in + * question to be a TLS variable. If the variable happens to be defined as an + * ordinary variable with external linkage in the same compilation unit (which + * amounts to the whole of vmlinux with LTO enabled), Clang will drop the + * segment register prefix from the references, resulting in broken code. Work + * around this by avoiding the symbol used in -mstack-protector-guard-symbol= + * entirely in the C code, and use an alias emitted by the linker script + * instead. + */ +#ifdef CONFIG_STACKPROTECTOR +EXPORT_SYMBOL(__ref_stack_chk_guard); +#endif +#endif diff --git a/arch/x86/include/asm/asm-prototypes.h b/arch/x86/include/asm/asm-prototypes.h index 25466c4d2134..3674006e3974 100644 --- a/arch/x86/include/asm/asm-prototypes.h +++ b/arch/x86/include/asm/asm-prototypes.h @@ -20,3 +20,6 @@ extern void cmpxchg8b_emu(void); #endif +#if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR) +extern unsigned long __ref_stack_chk_guard; +#endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index fab5caec0b72..823f44f7bc94 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo_x86 *c) { if (!cpu_has(c, X86_FEATURE_HYPERVISOR)) msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT); + + /* + * These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE + * in some BIOS versions but they can lead to random host reboots. + */ + switch (c->x86_model) { + case 0x18 ... 0x1f: + case 0x60 ... 0x7f: + clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD); + break; + } } static void init_amd_zen5(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a5f221ea5688..f43bb974fc66 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2089,8 +2089,10 @@ void syscall_init(void) #ifdef CONFIG_STACKPROTECTOR DEFINE_PER_CPU(unsigned long, __stack_chk_guard); +#ifndef CONFIG_SMP EXPORT_PER_CPU_SYMBOL(__stack_chk_guard); #endif +#endif #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index b8c5741d2fb4..feb8102a9ca7 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -491,6 +491,9 @@ SECTIONS . = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE), "kernel image bigger than KERNEL_IMAGE_SIZE"); +/* needed for Clang - see arch/x86/entry/entry.S */ +PROVIDE(__ref_stack_chk_guard = __stack_chk_guard); + #ifdef CONFIG_X86_64 /* * Per-cpu symbols which need to be offset from __per_cpu_load diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 70b02fc61d93..8d29163568a7 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -656,7 +656,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr, paddr_next = data->next; len = data->len; - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + if ((phys_addr > paddr) && + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { memunmap(data); return true; } @@ -718,7 +719,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, paddr_next = data->next; len = data->len; - if ((phys_addr > paddr) && (phys_addr < (paddr + len))) { + if ((phys_addr > paddr) && + (phys_addr < (paddr + sizeof(struct setup_data) + len))) { early_memunmap(data, sizeof(*data)); return true; }