2019-05-19 05:08:55 -07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2018-06-22 15:08:41 -07:00
|
|
|
/* cpu_feature_enabled() cannot be used this early */
|
|
|
|
#define USE_EARLY_PGTABLE_L5
|
|
|
|
|
2018-10-30 15:09:49 -07:00
|
|
|
#include <linux/memblock.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/linkage.h>
|
2008-09-04 20:09:00 -07:00
|
|
|
#include <linux/bitops.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/kernel.h>
|
2016-07-13 17:18:56 -07:00
|
|
|
#include <linux/export.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/percpu.h>
|
|
|
|
#include <linux/string.h>
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 03:06:57 -07:00
|
|
|
#include <linux/ctype.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/delay.h>
|
2017-02-01 11:08:20 -07:00
|
|
|
#include <linux/sched/mm.h>
|
2017-02-01 08:36:40 -07:00
|
|
|
#include <linux/sched/clock.h>
|
2017-02-03 17:20:53 -07:00
|
|
|
#include <linux/sched/task.h>
|
2020-01-09 05:17:23 -07:00
|
|
|
#include <linux/sched/smt.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/init.h>
|
2014-04-17 01:17:12 -07:00
|
|
|
#include <linux/kprobes.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/kgdb.h>
|
2023-06-13 16:39:41 -07:00
|
|
|
#include <linux/mem_encrypt.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <linux/smp.h>
|
2023-06-13 16:39:24 -07:00
|
|
|
#include <linux/cpu.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <linux/io.h>
|
2015-07-20 14:47:58 -07:00
|
|
|
#include <linux/syscore_ops.h>
|
2020-06-08 21:32:42 -07:00
|
|
|
#include <linux/pgtable.h>
|
2022-10-23 13:06:00 -07:00
|
|
|
#include <linux/stackprotector.h>
|
2023-06-13 16:39:24 -07:00
|
|
|
#include <linux/utsname.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
|
2023-06-13 16:39:24 -07:00
|
|
|
#include <asm/alternative.h>
|
2020-09-21 14:56:38 -07:00
|
|
|
#include <asm/cmdline.h>
|
perf: Do the big rename: Performance Counters -> Performance Events
Bye-bye Performance Counters, welcome Performance Events!
In the past few months the perfcounters subsystem has grown out its
initial role of counting hardware events, and has become (and is
becoming) a much broader generic event enumeration, reporting, logging,
monitoring, analysis facility.
Naming its core object 'perf_counter' and naming the subsystem
'perfcounters' has become more and more of a misnomer. With pending
code like hw-breakpoints support the 'counter' name is less and
less appropriate.
All in one, we've decided to rename the subsystem to 'performance
events' and to propagate this rename through all fields, variables
and API names. (in an ABI compatible fashion)
The word 'event' is also a bit shorter than 'counter' - which makes
it slightly more convenient to write/handle as well.
Thanks goes to Stephane Eranian who first observed this misnomer and
suggested a rename.
User-space tooling and ABI compatibility is not affected - this patch
should be function-invariant. (Also, defconfigs were not touched to
keep the size down.)
This patch has been generated via the following script:
FILES=$(find * -type f | grep -vE 'oprofile|[^K]config')
sed -i \
-e 's/PERF_EVENT_/PERF_RECORD_/g' \
-e 's/PERF_COUNTER/PERF_EVENT/g' \
-e 's/perf_counter/perf_event/g' \
-e 's/nb_counters/nb_events/g' \
-e 's/swcounter/swevent/g' \
-e 's/tpcounter_event/tp_event/g' \
$FILES
for N in $(find . -name perf_counter.[ch]); do
M=$(echo $N | sed 's/perf_counter/perf_event/g')
mv $N $M
done
FILES=$(find . -name perf_event.*)
sed -i \
-e 's/COUNTER_MASK/REG_MASK/g' \
-e 's/COUNTER/EVENT/g' \
-e 's/\<event\>/event_id/g' \
-e 's/counter/event/g' \
-e 's/Counter/Event/g' \
$FILES
... to keep it as correct as possible. This script can also be
used by anyone who has pending perfcounters patches - it converts
a Linux kernel tree over to the new naming. We tried to time this
change to the point in time where the amount of pending patches
is the smallest: the end of the merge window.
Namespace clashes were fixed up in a preparatory patch - and some
stylistic fallout will be fixed up in a subsequent patch.
( NOTE: 'counters' are still the proper terminology when we deal
with hardware registers - and these sed scripts are a bit
over-eager in renaming them. I've undone some of that, but
in case there's something left where 'counter' would be
better than 'event' we can undo that on an individual basis
instead of touching an otherwise nicely automated patch. )
Suggested-by: Stephane Eranian <eranian@google.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Paul Mackerras <paulus@samba.org>
Reviewed-by: Arjan van de Ven <arjan@linux.intel.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: David Howells <dhowells@redhat.com>
Cc: Kyle McMartin <kyle@mcmartin.ca>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arch@vger.kernel.org>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-09-21 03:02:48 -07:00
|
|
|
#include <asm/perf_event.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
#include <asm/mmu_context.h>
|
x86/doublefault/32: Move #DF stack and TSS to cpu_entry_area
There are three problems with the current layout of the doublefault
stack and TSS. First, the TSS is only cacheline-aligned, which is
not enough -- if the hardware portion of the TSS (struct x86_hw_tss)
crosses a page boundary, horrible things happen [0]. Second, the
stack and TSS are global, so simultaneous double faults on different
CPUs will cause massive corruption. Third, the whole mechanism
won't work if user CR3 is loaded, resulting in a triple fault [1].
Let the doublefault stack and TSS share a page (which prevents the
TSS from spanning a page boundary), make it percpu, and move it into
cpu_entry_area. Teach the stack dump code about the doublefault
stack.
[0] Real hardware will read past the end of the page onto the next
*physical* page if a task switch happens. Virtual machines may
have any number of bugs, and I would consider it reasonable for
a VM to summarily kill the guest if it tries to task-switch to
a page-spanning TSS.
[1] Real hardware triple faults. At least some VMs seem to hang.
I'm not sure what's going on.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-26 10:27:16 -07:00
|
|
|
#include <asm/doublefault.h>
|
2011-07-31 14:02:19 -07:00
|
|
|
#include <asm/archrandom.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/hypervisor.h>
|
|
|
|
#include <asm/processor.h>
|
2014-10-24 15:58:08 -07:00
|
|
|
#include <asm/tlbflush.h>
|
2012-01-20 14:24:09 -07:00
|
|
|
#include <asm/debugreg.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/sections.h>
|
2014-05-05 12:19:36 -07:00
|
|
|
#include <asm/vsyscall.h>
|
2009-07-03 16:35:45 -07:00
|
|
|
#include <linux/topology.h>
|
|
|
|
#include <linux/cpumask.h>
|
2011-07-26 16:09:06 -07:00
|
|
|
#include <linux/atomic.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/proto.h>
|
|
|
|
#include <asm/setup.h>
|
|
|
|
#include <asm/apic.h>
|
|
|
|
#include <asm/desc.h>
|
2021-10-14 18:16:39 -07:00
|
|
|
#include <asm/fpu/api.h>
|
2006-06-23 02:04:18 -07:00
|
|
|
#include <asm/mtrr.h>
|
2017-01-20 06:22:34 -07:00
|
|
|
#include <asm/hwcap2.h>
|
2009-07-03 16:35:45 -07:00
|
|
|
#include <linux/numa.h>
|
2020-08-06 05:35:11 -07:00
|
|
|
#include <asm/numa.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/asm.h>
|
x86/mm/mpx: Work around MPX erratum SKD046
This erratum essentially causes the CPU to forget which privilege
level it is operating on (kernel vs. user) for the purposes of MPX.
This erratum can only be triggered when a system is not using
Supervisor Mode Execution Prevention (SMEP). Our workaround for
the erratum is to ensure that MPX can only be used in cases where
SMEP is present in the processor and is enabled.
This erratum only affects Core processors. Atom is unaffected.
But, there is no architectural way to determine Atom vs. Core.
So, we just apply this workaround to all processors. It's
possible that it will mistakenly disable MPX on some Atom
processsors or future unaffected Core processors. There are
currently no processors that have MPX and not SMEP. It would
take something akin to a hypervisor masking SMEP out on an Atom
processor for this to present itself on current hardware.
More details can be found at:
http://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/desktop-6th-gen-core-family-spec-update.pdf
"
SKD046 Branch Instructions May Initialize MPX Bound Registers Incorrectly
Problem:
Depending on the current Intel MPX (Memory Protection
Extensions) configuration, execution of certain branch
instructions (near CALL, near RET, near JMP, and Jcc
instructions) without a BND prefix (F2H) initialize the MPX bound
registers. Due to this erratum, such a branch instruction that is
executed both with CPL = 3 and with CPL < 3 may not use the
correct MPX configuration register (BNDCFGU or BNDCFGS,
respectively) for determining whether to initialize the bound
registers; it may thus initialize the bound registers when it
should not, or fail to initialize them when it should.
Implication:
A branch instruction that has executed both in user mode and in
supervisor mode (from the same linear address) may cause a #BR
(bound range fault) when it should not have or may not cause a
#BR when it should have. Workaround An operating system can
avoid this erratum by setting CR4.SMEP[bit 20] to enable
supervisor-mode execution prevention (SMEP). When SMEP is
enabled, no code can be executed both with CPL = 3 and with CPL < 3.
"
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/20160512220400.3B35F1BC@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-12 15:04:00 -07:00
|
|
|
#include <asm/bugs.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/cpu.h>
|
2006-06-23 02:04:20 -07:00
|
|
|
#include <asm/mce.h>
|
2009-03-13 22:49:49 -07:00
|
|
|
#include <asm/msr.h>
|
2022-11-02 00:47:09 -07:00
|
|
|
#include <asm/cacheinfo.h>
|
2019-11-20 07:33:57 -07:00
|
|
|
#include <asm/memtype.h>
|
2012-12-21 00:44:23 -07:00
|
|
|
#include <asm/microcode.h>
|
2018-01-25 09:14:13 -07:00
|
|
|
#include <asm/intel-family.h>
|
|
|
|
#include <asm/cpu_device_id.h>
|
2023-12-05 03:50:24 -07:00
|
|
|
#include <asm/fred.h>
|
2009-01-21 01:26:06 -07:00
|
|
|
#include <asm/uv/uv.h>
|
2023-06-23 04:14:08 -07:00
|
|
|
#include <asm/ia32.h>
|
2023-06-13 16:39:24 -07:00
|
|
|
#include <asm/set_memory.h>
|
2022-03-08 08:30:35 -07:00
|
|
|
#include <asm/traps.h>
|
2022-02-09 11:10:11 -07:00
|
|
|
#include <asm/sev.h>
|
2023-12-08 10:07:21 -07:00
|
|
|
#include <asm/tdx.h>
|
2024-04-23 10:41:08 -07:00
|
|
|
#include <asm/posted_intr.h>
|
2024-10-23 18:17:46 -07:00
|
|
|
#include <asm/runtime-const.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
#include "cpu.h"
|
|
|
|
|
2024-03-04 03:12:23 -07:00
|
|
|
DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
|
|
|
|
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
|
|
|
|
2017-01-20 06:22:34 -07:00
|
|
|
u32 elf_hwcap2 __read_mostly;
|
|
|
|
|
2018-04-27 14:34:34 -07:00
|
|
|
/* Number of siblings per CPU package */
|
2024-02-13 14:06:12 -07:00
|
|
|
unsigned int __max_threads_per_core __ro_after_init = 1;
|
|
|
|
EXPORT_SYMBOL(__max_threads_per_core);
|
2018-04-27 14:34:34 -07:00
|
|
|
|
2024-02-13 14:06:03 -07:00
|
|
|
unsigned int __max_dies_per_package __ro_after_init = 1;
|
|
|
|
EXPORT_SYMBOL(__max_dies_per_package);
|
|
|
|
|
|
|
|
unsigned int __max_logical_packages __ro_after_init = 1;
|
|
|
|
EXPORT_SYMBOL(__max_logical_packages);
|
2024-02-13 14:04:05 -07:00
|
|
|
|
2024-02-13 14:06:14 -07:00
|
|
|
unsigned int __num_cores_per_package __ro_after_init = 1;
|
|
|
|
EXPORT_SYMBOL(__num_cores_per_package);
|
|
|
|
|
|
|
|
unsigned int __num_threads_per_package __ro_after_init = 1;
|
|
|
|
EXPORT_SYMBOL(__num_threads_per_package);
|
|
|
|
|
2022-01-31 16:01:07 -07:00
|
|
|
static struct ppin_info {
|
|
|
|
int feature;
|
|
|
|
int msr_ppin_ctl;
|
2022-01-31 16:01:09 -07:00
|
|
|
int msr_ppin;
|
2022-01-31 16:01:07 -07:00
|
|
|
} ppin_info[] = {
|
|
|
|
[X86_VENDOR_INTEL] = {
|
|
|
|
.feature = X86_FEATURE_INTEL_PPIN,
|
|
|
|
.msr_ppin_ctl = MSR_PPIN_CTL,
|
2022-01-31 16:01:09 -07:00
|
|
|
.msr_ppin = MSR_PPIN
|
2022-01-31 16:01:07 -07:00
|
|
|
},
|
|
|
|
[X86_VENDOR_AMD] = {
|
|
|
|
.feature = X86_FEATURE_AMD_PPIN,
|
|
|
|
.msr_ppin_ctl = MSR_AMD_PPIN_CTL,
|
2022-01-31 16:01:09 -07:00
|
|
|
.msr_ppin = MSR_AMD_PPIN
|
2022-01-31 16:01:07 -07:00
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct x86_cpu_id ppin_cpuids[] = {
|
|
|
|
X86_MATCH_FEATURE(X86_FEATURE_AMD_PPIN, &ppin_info[X86_VENDOR_AMD]),
|
2022-01-31 16:01:08 -07:00
|
|
|
X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
|
2022-01-31 16:01:07 -07:00
|
|
|
|
|
|
|
/* Legacy models without CPUID enumeration */
|
2024-04-24 11:15:07 -07:00
|
|
|
X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
|
|
|
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
|
2022-01-31 16:01:07 -07:00
|
|
|
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
static void ppin_init(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
const struct x86_cpu_id *id;
|
|
|
|
unsigned long long val;
|
|
|
|
struct ppin_info *info;
|
|
|
|
|
|
|
|
id = x86_match_cpu(ppin_cpuids);
|
|
|
|
if (!id)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Testing the presence of the MSR is not enough. Need to check
|
|
|
|
* that the PPIN_CTL allows reading of the PPIN.
|
|
|
|
*/
|
|
|
|
info = (struct ppin_info *)id->driver_data;
|
|
|
|
|
|
|
|
if (rdmsrl_safe(info->msr_ppin_ctl, &val))
|
|
|
|
goto clear_ppin;
|
|
|
|
|
|
|
|
if ((val & 3UL) == 1UL) {
|
|
|
|
/* PPIN locked in disabled mode */
|
|
|
|
goto clear_ppin;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If PPIN is disabled, try to enable */
|
|
|
|
if (!(val & 2UL)) {
|
|
|
|
wrmsrl_safe(info->msr_ppin_ctl, val | 2UL);
|
|
|
|
rdmsrl_safe(info->msr_ppin_ctl, &val);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is the enable bit set? */
|
|
|
|
if (val & 2UL) {
|
2022-01-31 16:01:09 -07:00
|
|
|
c->ppin = __rdmsr(info->msr_ppin);
|
2022-01-31 16:01:07 -07:00
|
|
|
set_cpu_cap(c, info->feature);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
clear_ppin:
|
|
|
|
clear_cpu_cap(c, info->feature);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void default_init(struct cpuinfo_x86 *c)
|
2009-08-11 11:00:11 -07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_64
|
2009-11-21 06:01:45 -07:00
|
|
|
cpu_detect_cache_sizes(c);
|
2009-08-11 11:00:11 -07:00
|
|
|
#else
|
|
|
|
/* Not much we can do here... */
|
|
|
|
/* Check if at least it has cpuid */
|
|
|
|
if (c->cpuid_level == -1) {
|
|
|
|
/* No cpuid. It must be an ancient CPU */
|
|
|
|
if (c->x86 == 4)
|
|
|
|
strcpy(c->x86_model_id, "486");
|
|
|
|
else if (c->x86 == 3)
|
|
|
|
strcpy(c->x86_model_id, "386");
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static const struct cpu_dev default_cpu = {
|
2009-08-11 11:00:11 -07:00
|
|
|
.c_init = default_init,
|
|
|
|
.c_vendor = "Unknown",
|
|
|
|
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static const struct cpu_dev *this_cpu = &default_cpu;
|
2008-09-04 12:09:47 -07:00
|
|
|
|
2009-01-21 01:26:05 -07:00
|
|
|
DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
|
2008-09-04 20:09:01 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
2009-01-21 01:26:05 -07:00
|
|
|
/*
|
|
|
|
* We need valid kernel segments for data and code in long mode too
|
|
|
|
* IRET will check the segment types kkeil 2000/10/28
|
|
|
|
* Also sysret mandates a special GDT layout
|
|
|
|
*
|
2009-03-13 22:49:49 -07:00
|
|
|
* TLS descriptors are currently at a different place compared to i386.
|
2009-01-21 01:26:05 -07:00
|
|
|
* Hopefully nobody expects them at a fixed place (Wine?)
|
|
|
|
*/
|
2023-12-19 08:11:59 -07:00
|
|
|
[GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE64, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA64, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA64 | DESC_USER, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE64 | DESC_USER, 0, 0xfffff),
|
2008-09-04 20:09:01 -07:00
|
|
|
#else
|
2023-12-19 08:11:58 -07:00
|
|
|
[GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(DESC_CODE32, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(DESC_CODE32 | DESC_USER, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(DESC_DATA32 | DESC_USER, 0, 0xfffff),
|
2007-05-02 10:27:10 -07:00
|
|
|
/*
|
|
|
|
* Segments used for calling PnP BIOS have byte granularity.
|
|
|
|
* They code segments and data segments have fixed 64k limits,
|
|
|
|
* the transfer segment sizes are set at run time.
|
|
|
|
*/
|
2023-12-19 08:11:58 -07:00
|
|
|
[GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
|
|
|
|
[GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
|
|
|
|
[GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0xffff),
|
|
|
|
[GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
|
|
|
|
[GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(DESC_DATA16, 0, 0),
|
2007-05-02 10:27:10 -07:00
|
|
|
/*
|
|
|
|
* The APM segments have byte granularity and their bases
|
|
|
|
* are set at run time. All have 64k limits.
|
|
|
|
*/
|
2023-12-19 08:11:58 -07:00
|
|
|
[GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(DESC_CODE32_BIOS, 0, 0xffff),
|
|
|
|
[GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(DESC_CODE16, 0, 0xffff),
|
|
|
|
[GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(DESC_DATA32_BIOS, 0, 0xffff),
|
2007-05-02 10:27:10 -07:00
|
|
|
|
2023-12-19 08:11:58 -07:00
|
|
|
[GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
|
|
|
|
[GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(DESC_DATA32, 0, 0xfffff),
|
2008-09-04 20:09:01 -07:00
|
|
|
#endif
|
2009-01-21 01:26:05 -07:00
|
|
|
} };
|
2007-05-02 10:27:15 -07:00
|
|
|
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
2007-05-02 10:27:10 -07:00
|
|
|
|
2017-06-29 08:53:20 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
2017-09-10 17:48:27 -07:00
|
|
|
static int __init x86_nopcid_setup(char *s)
|
2017-06-29 08:53:20 -07:00
|
|
|
{
|
2017-09-10 17:48:27 -07:00
|
|
|
/* nopcid doesn't accept parameters */
|
|
|
|
if (s)
|
|
|
|
return -EINVAL;
|
2017-06-29 08:53:20 -07:00
|
|
|
|
|
|
|
/* do not emit a message if the feature is not present */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_PCID))
|
2017-09-10 17:48:27 -07:00
|
|
|
return 0;
|
2017-06-29 08:53:20 -07:00
|
|
|
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
|
|
|
pr_info("nopcid: PCID feature disabled\n");
|
2017-09-10 17:48:27 -07:00
|
|
|
return 0;
|
2017-06-29 08:53:20 -07:00
|
|
|
}
|
2017-09-10 17:48:27 -07:00
|
|
|
early_param("nopcid", x86_nopcid_setup);
|
2017-06-29 08:53:20 -07:00
|
|
|
#endif
|
|
|
|
|
2016-01-29 12:42:58 -07:00
|
|
|
static int __init x86_noinvpcid_setup(char *s)
|
|
|
|
{
|
|
|
|
/* noinvpcid doesn't accept parameters */
|
|
|
|
if (s)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* do not emit a message if the feature is not present */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_INVPCID))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_INVPCID);
|
|
|
|
pr_info("noinvpcid: INVPCID feature disabled\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
early_param("noinvpcid", x86_noinvpcid_setup);
|
|
|
|
|
2008-09-04 20:09:02 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static int cachesize_override = -1;
|
|
|
|
static int disable_x86_serial_nr = 1;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:47 -07:00
|
|
|
static int __init cachesize_setup(char *str)
|
|
|
|
{
|
|
|
|
get_option(&str, &cachesize_override);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("cachesize=", cachesize_setup);
|
|
|
|
|
|
|
|
/* Standard macro to see if a specific flag is changeable */
|
|
|
|
static inline int flag_is_changeable_p(u32 flag)
|
|
|
|
{
|
|
|
|
u32 f1, f2;
|
|
|
|
|
2008-09-30 14:17:51 -07:00
|
|
|
/*
|
|
|
|
* Cyrix and IDT cpus allow disabling of CPUID
|
|
|
|
* so the code below may return different results
|
|
|
|
* when it is executed before and after enabling
|
|
|
|
* the CPUID. Add "volatile" to not allow gcc to
|
|
|
|
* optimize the subsequent calls to this function.
|
|
|
|
*/
|
2009-03-14 00:46:17 -07:00
|
|
|
asm volatile ("pushfl \n\t"
|
|
|
|
"pushfl \n\t"
|
|
|
|
"popl %0 \n\t"
|
|
|
|
"movl %0, %1 \n\t"
|
|
|
|
"xorl %2, %0 \n\t"
|
|
|
|
"pushl %0 \n\t"
|
|
|
|
"popfl \n\t"
|
|
|
|
"pushfl \n\t"
|
|
|
|
"popl %0 \n\t"
|
|
|
|
"popfl \n\t"
|
|
|
|
|
2008-09-30 14:17:51 -07:00
|
|
|
: "=&r" (f1), "=&r" (f2)
|
|
|
|
: "ir" (flag));
|
2008-09-04 12:09:47 -07:00
|
|
|
|
|
|
|
return ((f1^f2) & flag) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Probe for the CPUID instruction */
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
int have_cpuid_p(void)
|
2008-09-04 12:09:47 -07:00
|
|
|
{
|
|
|
|
return flag_is_changeable_p(X86_EFLAGS_ID);
|
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
2008-09-04 12:09:47 -07:00
|
|
|
{
|
2009-03-14 00:46:17 -07:00
|
|
|
unsigned long lo, hi;
|
|
|
|
|
|
|
|
if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Disable processor serial number: */
|
|
|
|
|
|
|
|
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
|
|
|
lo |= 0x200000;
|
|
|
|
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
|
|
|
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_notice("CPU serial number disabled.\n");
|
2009-03-14 00:46:17 -07:00
|
|
|
clear_cpu_cap(c, X86_FEATURE_PN);
|
|
|
|
|
|
|
|
/* Disabling the serial number may affect the cpuid level */
|
|
|
|
c->cpuid_level = cpuid_eax(0);
|
2008-09-04 12:09:47 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __init x86_serial_nr_setup(char *s)
|
|
|
|
{
|
|
|
|
disable_x86_serial_nr = 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("serialnumber", x86_serial_nr_setup);
|
2008-09-04 20:09:02 -07:00
|
|
|
#else
|
2008-09-04 20:09:13 -07:00
|
|
|
static inline int flag_is_changeable_p(u32 flag)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
}
|
2008-09-04 20:09:02 -07:00
|
|
|
#endif
|
2008-09-04 12:09:47 -07:00
|
|
|
|
2012-09-26 18:02:28 -07:00
|
|
|
static __always_inline void setup_smep(struct cpuinfo_x86 *c)
|
2011-05-11 16:51:05 -07:00
|
|
|
{
|
2012-09-26 18:02:28 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SMEP))
|
2014-10-24 15:58:07 -07:00
|
|
|
cr4_set_bits(X86_CR4_SMEP);
|
2011-05-11 16:51:05 -07:00
|
|
|
}
|
|
|
|
|
2012-09-26 18:02:28 -07:00
|
|
|
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2015-06-03 02:31:14 -07:00
|
|
|
unsigned long eflags = native_save_fl();
|
2012-09-26 18:02:28 -07:00
|
|
|
|
|
|
|
/* This should have been cleared long ago */
|
|
|
|
BUG_ON(eflags & X86_EFLAGS_AC);
|
|
|
|
|
2022-01-27 04:56:23 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SMAP))
|
2014-10-24 15:58:07 -07:00
|
|
|
cr4_set_bits(X86_CR4_SMAP);
|
2011-05-11 16:51:05 -07:00
|
|
|
}
|
|
|
|
|
2017-11-05 19:27:54 -07:00
|
|
|
static __always_inline void setup_umip(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/* Check the boot processor, plus build option for UMIP. */
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_UMIP))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Check the current processor's cpuid bits. */
|
|
|
|
if (!cpu_has(c, X86_FEATURE_UMIP))
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cr4_set_bits(X86_CR4_UMIP);
|
|
|
|
|
2018-12-04 15:27:20 -07:00
|
|
|
pr_info_once("x86/cpu: User Mode Instruction Prevention (UMIP) activated\n");
|
2017-11-13 23:29:43 -07:00
|
|
|
|
2017-11-05 19:27:54 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
out:
|
|
|
|
/*
|
|
|
|
* Make sure UMIP is disabled in case it was enabled in a
|
|
|
|
* previous boot (e.g., via kexec).
|
|
|
|
*/
|
|
|
|
cr4_clear_bits(X86_CR4_UMIP);
|
|
|
|
}
|
|
|
|
|
2020-06-08 20:15:09 -07:00
|
|
|
/* These bits should not change their value after CPU init is finished. */
|
2023-12-05 03:50:00 -07:00
|
|
|
static const unsigned long cr4_pinned_mask = X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP |
|
|
|
|
X86_CR4_FSGSBASE | X86_CR4_CET | X86_CR4_FRED;
|
2019-07-10 12:42:46 -07:00
|
|
|
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
|
|
|
|
static unsigned long cr4_pinned_bits __ro_after_init;
|
|
|
|
|
|
|
|
void native_write_cr0(unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long bits_missing = 0;
|
|
|
|
|
|
|
|
set_register:
|
x86/asm: Replace __force_order with a memory clobber
The CRn accessor functions use __force_order as a dummy operand to
prevent the compiler from reordering CRn reads/writes with respect to
each other.
The fact that the asm is volatile should be enough to prevent this:
volatile asm statements should be executed in program order. However GCC
4.9.x and 5.x have a bug that might result in reordering. This was fixed
in 8.1, 7.3 and 6.5. Versions prior to these, including 5.x and 4.9.x,
may reorder volatile asm statements with respect to each other.
There are some issues with __force_order as implemented:
- It is used only as an input operand for the write functions, and hence
doesn't do anything additional to prevent reordering writes.
- It allows memory accesses to be cached/reordered across write
functions, but CRn writes affect the semantics of memory accesses, so
this could be dangerous.
- __force_order is not actually defined in the kernel proper, but the
LLVM toolchain can in some cases require a definition: LLVM (as well
as GCC 4.9) requires it for PIE code, which is why the compressed
kernel has a definition, but also the clang integrated assembler may
consider the address of __force_order to be significant, resulting in
a reference that requires a definition.
Fix this by:
- Using a memory clobber for the write functions to additionally prevent
caching/reordering memory accesses across CRn writes.
- Using a dummy input operand with an arbitrary constant address for the
read functions, instead of a global variable. This will prevent reads
from being reordered across writes, while allowing memory loads to be
cached/reordered across CRn reads, which should be safe.
Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Tested-by: Nathan Chancellor <natechancellor@gmail.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82602
Link: https://lore.kernel.org/lkml/20200527135329.1172644-1-arnd@arndb.de/
Link: https://lkml.kernel.org/r/20200902232152.3709896-1-nivedita@alum.mit.edu
2020-09-02 16:21:52 -07:00
|
|
|
asm volatile("mov %0,%%cr0": "+r" (val) : : "memory");
|
2019-07-10 12:42:46 -07:00
|
|
|
|
|
|
|
if (static_branch_likely(&cr_pinning)) {
|
|
|
|
if (unlikely((val & X86_CR0_WP) != X86_CR0_WP)) {
|
|
|
|
bits_missing = X86_CR0_WP;
|
|
|
|
val |= bits_missing;
|
|
|
|
goto set_register;
|
|
|
|
}
|
|
|
|
/* Warn after we've set the missing bits. */
|
|
|
|
WARN_ONCE(bits_missing, "CR0 WP bit went missing!?\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(native_write_cr0);
|
|
|
|
|
2021-12-17 08:48:29 -07:00
|
|
|
void __no_profile native_write_cr4(unsigned long val)
|
2019-07-10 12:42:46 -07:00
|
|
|
{
|
2020-06-08 20:15:09 -07:00
|
|
|
unsigned long bits_changed = 0;
|
2019-07-10 12:42:46 -07:00
|
|
|
|
|
|
|
set_register:
|
x86/asm: Replace __force_order with a memory clobber
The CRn accessor functions use __force_order as a dummy operand to
prevent the compiler from reordering CRn reads/writes with respect to
each other.
The fact that the asm is volatile should be enough to prevent this:
volatile asm statements should be executed in program order. However GCC
4.9.x and 5.x have a bug that might result in reordering. This was fixed
in 8.1, 7.3 and 6.5. Versions prior to these, including 5.x and 4.9.x,
may reorder volatile asm statements with respect to each other.
There are some issues with __force_order as implemented:
- It is used only as an input operand for the write functions, and hence
doesn't do anything additional to prevent reordering writes.
- It allows memory accesses to be cached/reordered across write
functions, but CRn writes affect the semantics of memory accesses, so
this could be dangerous.
- __force_order is not actually defined in the kernel proper, but the
LLVM toolchain can in some cases require a definition: LLVM (as well
as GCC 4.9) requires it for PIE code, which is why the compressed
kernel has a definition, but also the clang integrated assembler may
consider the address of __force_order to be significant, resulting in
a reference that requires a definition.
Fix this by:
- Using a memory clobber for the write functions to additionally prevent
caching/reordering memory accesses across CRn writes.
- Using a dummy input operand with an arbitrary constant address for the
read functions, instead of a global variable. This will prevent reads
from being reordered across writes, while allowing memory loads to be
cached/reordered across CRn reads, which should be safe.
Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu>
Signed-off-by: Borislav Petkov <bp@suse.de>
Reviewed-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Miguel Ojeda <miguel.ojeda.sandonis@gmail.com>
Tested-by: Nathan Chancellor <natechancellor@gmail.com>
Tested-by: Sedat Dilek <sedat.dilek@gmail.com>
Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82602
Link: https://lore.kernel.org/lkml/20200527135329.1172644-1-arnd@arndb.de/
Link: https://lkml.kernel.org/r/20200902232152.3709896-1-nivedita@alum.mit.edu
2020-09-02 16:21:52 -07:00
|
|
|
asm volatile("mov %0,%%cr4": "+r" (val) : : "memory");
|
2019-07-10 12:42:46 -07:00
|
|
|
|
|
|
|
if (static_branch_likely(&cr_pinning)) {
|
2020-06-08 20:15:09 -07:00
|
|
|
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
|
|
|
|
bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
|
|
|
|
val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
|
2019-07-10 12:42:46 -07:00
|
|
|
goto set_register;
|
|
|
|
}
|
2020-06-08 20:15:09 -07:00
|
|
|
/* Warn after we've corrected the changed bits. */
|
|
|
|
WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
|
|
|
|
bits_changed);
|
2019-07-10 12:42:46 -07:00
|
|
|
}
|
|
|
|
}
|
2020-04-26 09:55:15 -07:00
|
|
|
#if IS_MODULE(CONFIG_LKDTM)
|
2020-04-21 02:20:29 -07:00
|
|
|
EXPORT_SYMBOL_GPL(native_write_cr4);
|
2020-04-26 09:55:15 -07:00
|
|
|
#endif
|
2020-04-21 02:20:29 -07:00
|
|
|
|
|
|
|
void cr4_update_irqsoff(unsigned long set, unsigned long clear)
|
|
|
|
{
|
|
|
|
unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
|
|
|
|
|
|
|
|
lockdep_assert_irqs_disabled();
|
|
|
|
|
|
|
|
newval = (cr4 & ~clear) | set;
|
|
|
|
if (newval != cr4) {
|
|
|
|
this_cpu_write(cpu_tlbstate.cr4, newval);
|
|
|
|
__write_cr4(newval);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(cr4_update_irqsoff);
|
|
|
|
|
|
|
|
/* Read the CR4 shadow. */
|
|
|
|
unsigned long cr4_read_shadow(void)
|
|
|
|
{
|
|
|
|
return this_cpu_read(cpu_tlbstate.cr4);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(cr4_read_shadow);
|
2019-07-10 12:42:46 -07:00
|
|
|
|
|
|
|
void cr4_init(void)
|
|
|
|
{
|
|
|
|
unsigned long cr4 = __read_cr4();
|
|
|
|
|
|
|
|
if (boot_cpu_has(X86_FEATURE_PCID))
|
|
|
|
cr4 |= X86_CR4_PCIDE;
|
|
|
|
if (static_branch_likely(&cr_pinning))
|
2020-06-08 20:15:09 -07:00
|
|
|
cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
|
2019-07-10 12:42:46 -07:00
|
|
|
|
|
|
|
__write_cr4(cr4);
|
|
|
|
|
|
|
|
/* Initialize cr4 shadow for this CPU. */
|
|
|
|
this_cpu_write(cpu_tlbstate.cr4, cr4);
|
|
|
|
}
|
2019-06-17 21:55:02 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Once CPU feature detection is finished (and boot params have been
|
|
|
|
* parsed), record any of the sensitive CR bits that are set, and
|
|
|
|
* enable CR pinning.
|
|
|
|
*/
|
|
|
|
static void __init setup_cr_pinning(void)
|
|
|
|
{
|
2020-06-08 20:15:09 -07:00
|
|
|
cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
|
2019-06-17 21:55:02 -07:00
|
|
|
static_key_enable(&cr_pinning.key);
|
|
|
|
}
|
|
|
|
|
2020-05-28 13:13:58 -07:00
|
|
|
static __init int x86_nofsgsbase_setup(char *arg)
|
2020-05-28 13:13:48 -07:00
|
|
|
{
|
2020-05-28 13:13:58 -07:00
|
|
|
/* Require an exact match without trailing characters. */
|
|
|
|
if (strlen(arg))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Do not emit a message if the feature is not present. */
|
|
|
|
if (!boot_cpu_has(X86_FEATURE_FSGSBASE))
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_FSGSBASE);
|
|
|
|
pr_info("FSGSBASE disabled via kernel command line\n");
|
2020-05-28 13:13:48 -07:00
|
|
|
return 1;
|
|
|
|
}
|
2020-05-28 13:13:58 -07:00
|
|
|
__setup("nofsgsbase", x86_nofsgsbase_setup);
|
2020-05-28 13:13:48 -07:00
|
|
|
|
2016-02-12 14:02:29 -07:00
|
|
|
/*
|
|
|
|
* Protection Keys are not available in 32-bit mode.
|
|
|
|
*/
|
|
|
|
static bool pku_disabled;
|
|
|
|
|
|
|
|
static __always_inline void setup_pku(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2021-06-23 05:02:07 -07:00
|
|
|
if (c == &boot_cpu_data) {
|
|
|
|
if (pku_disabled || !cpu_feature_enabled(X86_FEATURE_PKU))
|
|
|
|
return;
|
|
|
|
/*
|
|
|
|
* Setting CR4.PKE will cause the X86_FEATURE_OSPKE cpuid
|
|
|
|
* bit to be set. Enforce it.
|
|
|
|
*/
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_OSPKE);
|
2019-04-03 09:41:56 -07:00
|
|
|
|
2021-06-23 05:02:07 -07:00
|
|
|
} else if (!cpu_feature_enabled(X86_FEATURE_OSPKE)) {
|
2016-02-12 14:02:29 -07:00
|
|
|
return;
|
2021-06-23 05:02:07 -07:00
|
|
|
}
|
2016-02-12 14:02:29 -07:00
|
|
|
|
|
|
|
cr4_set_bits(X86_CR4_PKE);
|
2021-06-23 05:02:10 -07:00
|
|
|
/* Load the default PKRU value */
|
|
|
|
pkru_write_default();
|
2016-02-12 14:02:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
|
|
|
static __init int setup_disable_pku(char *arg)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Do not clear the X86_FEATURE_PKU bit. All of the
|
|
|
|
* runtime checks are against OSPKE so clearing the
|
|
|
|
* bit does nothing.
|
|
|
|
*
|
|
|
|
* This way, we will see "pku" in cpuinfo, but not
|
|
|
|
* "ospke", which is exactly what we want. It shows
|
|
|
|
* that the CPU has PKU, but the OS has not enabled it.
|
|
|
|
* This happens to be exactly how a system would look
|
|
|
|
* if we disabled the config option.
|
|
|
|
*/
|
|
|
|
pr_info("x86: 'nopku' specified, disabling Memory Protection Keys\n");
|
|
|
|
pku_disabled = true;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
__setup("nopku", setup_disable_pku);
|
2023-01-13 06:01:26 -07:00
|
|
|
#endif
|
2016-02-12 14:02:29 -07:00
|
|
|
|
2022-03-08 08:30:38 -07:00
|
|
|
#ifdef CONFIG_X86_KERNEL_IBT
|
|
|
|
|
2023-02-06 05:33:07 -07:00
|
|
|
__noendbr u64 ibt_save(bool disable)
|
2022-03-08 08:30:38 -07:00
|
|
|
{
|
|
|
|
u64 msr = 0;
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_IBT)) {
|
|
|
|
rdmsrl(MSR_IA32_S_CET, msr);
|
2023-02-06 05:33:07 -07:00
|
|
|
if (disable)
|
|
|
|
wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
|
2022-03-08 08:30:38 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
return msr;
|
|
|
|
}
|
|
|
|
|
|
|
|
__noendbr void ibt_restore(u64 save)
|
|
|
|
{
|
|
|
|
u64 msr;
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_IBT)) {
|
|
|
|
rdmsrl(MSR_IA32_S_CET, msr);
|
|
|
|
msr &= ~CET_ENDBR_EN;
|
|
|
|
msr |= (save & CET_ENDBR_EN);
|
|
|
|
wrmsrl(MSR_IA32_S_CET, msr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2022-03-08 08:30:35 -07:00
|
|
|
static __always_inline void setup_cet(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2023-06-12 17:11:04 -07:00
|
|
|
bool user_shstk, kernel_ibt;
|
2022-03-08 08:30:35 -07:00
|
|
|
|
2023-06-12 17:11:04 -07:00
|
|
|
if (!IS_ENABLED(CONFIG_X86_CET))
|
2022-03-08 08:30:35 -07:00
|
|
|
return;
|
|
|
|
|
2023-06-12 17:11:04 -07:00
|
|
|
kernel_ibt = HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT);
|
|
|
|
user_shstk = cpu_feature_enabled(X86_FEATURE_SHSTK) &&
|
|
|
|
IS_ENABLED(CONFIG_X86_USER_SHADOW_STACK);
|
|
|
|
|
|
|
|
if (!kernel_ibt && !user_shstk)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (user_shstk)
|
|
|
|
set_cpu_cap(c, X86_FEATURE_USER_SHSTK);
|
|
|
|
|
|
|
|
if (kernel_ibt)
|
|
|
|
wrmsrl(MSR_IA32_S_CET, CET_ENDBR_EN);
|
|
|
|
else
|
|
|
|
wrmsrl(MSR_IA32_S_CET, 0);
|
|
|
|
|
2022-03-08 08:30:35 -07:00
|
|
|
cr4_set_bits(X86_CR4_CET);
|
|
|
|
|
2023-04-07 17:16:41 -07:00
|
|
|
if (kernel_ibt && ibt_selftest()) {
|
2022-03-08 08:30:35 -07:00
|
|
|
pr_err("IBT selftest: Failed!\n");
|
2022-10-27 02:28:14 -07:00
|
|
|
wrmsrl(MSR_IA32_S_CET, 0);
|
2022-03-08 08:30:35 -07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_IBT);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-08 08:30:36 -07:00
|
|
|
__noendbr void cet_disable(void)
|
|
|
|
{
|
2023-06-12 17:11:04 -07:00
|
|
|
if (!(cpu_feature_enabled(X86_FEATURE_IBT) ||
|
|
|
|
cpu_feature_enabled(X86_FEATURE_SHSTK)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
wrmsrl(MSR_IA32_S_CET, 0);
|
|
|
|
wrmsrl(MSR_IA32_U_CET, 0);
|
2022-03-08 08:30:36 -07:00
|
|
|
}
|
|
|
|
|
2009-01-23 18:20:50 -07:00
|
|
|
/*
|
|
|
|
* Some CPU features depend on higher CPUID levels, which may not always
|
|
|
|
* be available due to CPUID level capping or broken virtualization
|
|
|
|
* software. Add those features to this table to auto-disable them.
|
|
|
|
*/
|
|
|
|
struct cpuid_dependent_feature {
|
|
|
|
u32 feature;
|
|
|
|
u32 level;
|
|
|
|
};
|
2009-03-14 00:46:17 -07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static const struct cpuid_dependent_feature
|
2009-01-23 18:20:50 -07:00
|
|
|
cpuid_dependent_features[] = {
|
|
|
|
{ X86_FEATURE_MWAIT, 0x00000005 },
|
|
|
|
{ X86_FEATURE_DCA, 0x00000009 },
|
|
|
|
{ X86_FEATURE_XSAVE, 0x0000000d },
|
|
|
|
{ 0, 0 }
|
|
|
|
};
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
|
2009-01-23 18:20:50 -07:00
|
|
|
{
|
|
|
|
const struct cpuid_dependent_feature *df;
|
2009-03-13 22:49:49 -07:00
|
|
|
|
2009-01-23 18:20:50 -07:00
|
|
|
for (df = cpuid_dependent_features; df->feature; df++) {
|
2009-03-14 00:46:17 -07:00
|
|
|
|
|
|
|
if (!cpu_has(c, df->feature))
|
|
|
|
continue;
|
2009-01-23 18:20:50 -07:00
|
|
|
/*
|
|
|
|
* Note: cpuid_level is set to -1 if unavailable, but
|
|
|
|
* extended_extended_level is set to 0 if unavailable
|
|
|
|
* and the legitimate extended levels are all negative
|
|
|
|
* when signed; hence the weird messing around with
|
|
|
|
* signs here...
|
|
|
|
*/
|
2009-03-14 00:46:17 -07:00
|
|
|
if (!((s32)df->level < 0 ?
|
2009-02-15 00:59:18 -07:00
|
|
|
(u32)df->level > (u32)c->extended_cpuid_level :
|
2009-03-14 00:46:17 -07:00
|
|
|
(s32)df->level > (s32)c->cpuid_level))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
clear_cpu_cap(c, df->feature);
|
|
|
|
if (!warn)
|
|
|
|
continue;
|
|
|
|
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_warn("CPU: CPU feature " X86_CAP_FMT " disabled, no CPUID level 0x%x\n",
|
|
|
|
x86_cap_flag(df->feature), df->level);
|
2009-01-23 18:20:50 -07:00
|
|
|
}
|
2009-02-15 00:59:18 -07:00
|
|
|
}
|
2009-01-23 18:20:50 -07:00
|
|
|
|
2008-09-04 20:09:13 -07:00
|
|
|
/*
|
|
|
|
* Naming convention should be: <Name> [(<Codename>)]
|
|
|
|
* This table only is used unless init_<vendor>() below doesn't set it;
|
2009-03-14 00:46:17 -07:00
|
|
|
* in particular, if CPUID levels 0x80000002..4 are supported, this
|
|
|
|
* isn't used
|
2008-09-04 20:09:13 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Look up CPU names by table lookup. */
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static const char *table_lookup_model(struct cpuinfo_x86 *c)
|
2008-09-04 20:09:13 -07:00
|
|
|
{
|
2013-10-21 01:35:20 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
const struct legacy_cpu_model_info *info;
|
2008-09-04 20:09:13 -07:00
|
|
|
|
|
|
|
if (c->x86_model >= 16)
|
|
|
|
return NULL; /* Range check */
|
|
|
|
|
|
|
|
if (!this_cpu)
|
|
|
|
return NULL;
|
|
|
|
|
2013-10-21 01:35:20 -07:00
|
|
|
info = this_cpu->legacy_models;
|
2008-09-04 20:09:13 -07:00
|
|
|
|
2013-10-21 01:35:20 -07:00
|
|
|
while (info->family) {
|
2008-09-04 20:09:13 -07:00
|
|
|
if (info->family == c->x86)
|
|
|
|
return info->model_names[c->x86_model];
|
|
|
|
info++;
|
|
|
|
}
|
2013-10-21 01:35:20 -07:00
|
|
|
#endif
|
2008-09-04 20:09:13 -07:00
|
|
|
return NULL; /* Not found */
|
|
|
|
}
|
|
|
|
|
2019-09-16 15:39:56 -07:00
|
|
|
/* Aligned to unsigned long to avoid split lock in atomic bitmap ops */
|
|
|
|
__u32 cpu_caps_cleared[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
|
|
|
|
__u32 cpu_caps_set[NCAPINTS + NBUGINTS] __aligned(sizeof(unsigned long));
|
2008-01-30 05:33:20 -07:00
|
|
|
|
2017-12-04 07:07:20 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/* The 32-bit entry code needs to find cpu_entry_area. */
|
|
|
|
DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
|
|
|
|
#endif
|
|
|
|
|
2017-03-14 10:05:08 -07:00
|
|
|
/* Load the original GDT from the per-cpu structure */
|
|
|
|
void load_direct_gdt(int cpu)
|
|
|
|
{
|
|
|
|
struct desc_ptr gdt_descr;
|
|
|
|
|
|
|
|
gdt_descr.address = (long)get_cpu_gdt_rw(cpu);
|
|
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
|
|
load_gdt(&gdt_descr);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(load_direct_gdt);
|
|
|
|
|
2017-03-14 10:05:07 -07:00
|
|
|
/* Load a fixmap remapping of the per-cpu GDT */
|
|
|
|
void load_fixmap_gdt(int cpu)
|
|
|
|
{
|
|
|
|
struct desc_ptr gdt_descr;
|
|
|
|
|
|
|
|
gdt_descr.address = (long)get_cpu_gdt_ro(cpu);
|
|
|
|
gdt_descr.size = GDT_SIZE - 1;
|
|
|
|
load_gdt(&gdt_descr);
|
|
|
|
}
|
2017-03-14 10:05:08 -07:00
|
|
|
EXPORT_SYMBOL_GPL(load_fixmap_gdt);
|
2017-03-14 10:05:07 -07:00
|
|
|
|
2022-09-15 04:10:41 -07:00
|
|
|
/**
|
2022-09-15 04:10:42 -07:00
|
|
|
* switch_gdt_and_percpu_base - Switch to direct GDT and runtime per CPU base
|
2022-09-15 04:10:41 -07:00
|
|
|
* @cpu: The CPU number for which this is invoked
|
|
|
|
*
|
2022-09-15 04:10:42 -07:00
|
|
|
* Invoked during early boot to switch from early GDT and early per CPU to
|
|
|
|
* the direct GDT and the runtime per CPU area. On 32-bit the percpu base
|
|
|
|
* switch is implicit by loading the direct GDT. On 64bit this requires
|
|
|
|
* to update GSBASE.
|
2009-03-14 00:46:17 -07:00
|
|
|
*/
|
2022-09-15 04:10:42 -07:00
|
|
|
void __init switch_gdt_and_percpu_base(int cpu)
|
2008-09-04 12:09:44 -07:00
|
|
|
{
|
2017-03-14 10:05:08 -07:00
|
|
|
load_direct_gdt(cpu);
|
2022-09-15 04:10:41 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/*
|
|
|
|
* No need to load %gs. It is already correct.
|
|
|
|
*
|
|
|
|
* Writing %gs on 64bit would zero GSBASE which would make any per
|
|
|
|
* CPU operation up to the point of the wrmsrl() fault.
|
|
|
|
*
|
|
|
|
* Set GSBASE to the new offset. Until the wrmsrl() happens the
|
|
|
|
* early mapping is still valid. That means the GSBASE update will
|
|
|
|
* lose any prior per CPU data which was not copied over in
|
|
|
|
* setup_per_cpu_areas().
|
2022-09-15 04:10:43 -07:00
|
|
|
*
|
|
|
|
* This works even with stackprotector enabled because the
|
|
|
|
* per CPU stack canary is 0 in both per CPU areas.
|
2022-09-15 04:10:41 -07:00
|
|
|
*/
|
|
|
|
wrmsrl(MSR_GS_BASE, cpu_kernelmode_gs_base(cpu));
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* %fs is already set to __KERNEL_PERCPU, but after switching GDT
|
|
|
|
* it is required to load FS again so that the 'hidden' part is
|
|
|
|
* updated from the new GDT. Up to this point the early per CPU
|
|
|
|
* translation is active. Any content of the early per CPU data
|
|
|
|
* which was not copied over in setup_per_cpu_areas() is lost.
|
|
|
|
*/
|
|
|
|
loadsegment(fs, __KERNEL_PERCPU);
|
|
|
|
#endif
|
2008-09-04 12:09:44 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
2005-04-16 15:20:36 -07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void get_model_name(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
unsigned int *v;
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 03:06:57 -07:00
|
|
|
char *p, *q, *s;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
if (c->extended_cpuid_level < 0x80000004)
|
2008-09-06 01:52:27 -07:00
|
|
|
return;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-03-14 00:46:17 -07:00
|
|
|
v = (unsigned int *)c->x86_model_id;
|
2005-04-16 15:20:36 -07:00
|
|
|
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
|
|
|
|
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
|
|
|
|
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
|
|
|
|
c->x86_model_id[48] = 0;
|
|
|
|
|
x86/cpu: Trim model ID whitespace
We did try trimming whitespace surrounding the 'model name'
field in /proc/cpuinfo since reportedly some userspace uses it
in string comparisons and there were discrepancies:
[thetango@prarit ~]# grep "^model name" /proc/cpuinfo | uniq -c | sed 's/\ /_/g'
______1_model_name :_AMD_Opteron(TM)_Processor_6272
_____63_model_name :_AMD_Opteron(TM)_Processor_6272_________________
However, there were issues with overlapping buffers, string
sizes and non-byte-sized copies in the previous proposed
solutions; see Link tags below for the whole farce.
So, instead of diddling with this more, let's simply extend what
was there originally with trimming any present trailing
whitespace. Final result is really simple and obvious.
Testing with the most insane model IDs qemu can generate, looks
good:
.model_id = " My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = "My funny model ID CPU ",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " My funny model ID CPU",
______4_model_name :_My_funny_model_ID_CPU
.model_id = " ",
______4_model_name :__
.model_id = "",
______4_model_name :_15/02
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Igor Mammedov <imammedo@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1432050210-32036-1-git-send-email-prarit@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-06-01 03:06:57 -07:00
|
|
|
/* Trim whitespace */
|
|
|
|
p = q = s = &c->x86_model_id[0];
|
|
|
|
|
|
|
|
while (*p == ' ')
|
|
|
|
p++;
|
|
|
|
|
|
|
|
while (*p) {
|
|
|
|
/* Note the last non-whitespace index */
|
|
|
|
if (!isspace(*p))
|
|
|
|
s = q;
|
|
|
|
|
|
|
|
*q++ = *p++;
|
|
|
|
}
|
|
|
|
|
|
|
|
*(s + 1) = '\0';
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2008-09-04 12:09:44 -07:00
|
|
|
unsigned int n, dummy, ebx, ecx, edx, l2size;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
n = c->extended_cpuid_level;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
if (n >= 0x80000005) {
|
2008-09-04 12:09:44 -07:00
|
|
|
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
|
|
|
|
c->x86_cache_size = (ecx>>24) + (edx>>24);
|
2008-09-04 20:09:07 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
/* On K8 L1 TLB is inclusive, so don't count it */
|
|
|
|
c->x86_tlbsize = 0;
|
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (n < 0x80000006) /* Some chips just has a large L1. */
|
|
|
|
return;
|
|
|
|
|
2008-09-04 12:09:47 -07:00
|
|
|
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
|
2005-04-16 15:20:36 -07:00
|
|
|
l2size = ecx >> 16;
|
2008-02-24 03:58:13 -07:00
|
|
|
|
2008-09-04 20:09:07 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
|
|
|
|
#else
|
2005-04-16 15:20:36 -07:00
|
|
|
/* do processor-specific cache resizing */
|
2013-10-21 01:35:20 -07:00
|
|
|
if (this_cpu->legacy_cache_size)
|
|
|
|
l2size = this_cpu->legacy_cache_size(c, l2size);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
/* Allow user to override all this if necessary. */
|
|
|
|
if (cachesize_override != -1)
|
|
|
|
l2size = cachesize_override;
|
|
|
|
|
2008-02-24 03:58:13 -07:00
|
|
|
if (l2size == 0)
|
2005-04-16 15:20:36 -07:00
|
|
|
return; /* Again, no L2 cache is possible */
|
2008-09-04 20:09:07 -07:00
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
c->x86_cache_size = l2size;
|
|
|
|
}
|
|
|
|
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-27 18:02:16 -07:00
|
|
|
u16 __read_mostly tlb_lli_4k[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lli_2m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lli_4m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_4k[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_2m[NR_INFO];
|
|
|
|
u16 __read_mostly tlb_lld_4m[NR_INFO];
|
x86, cpu: Detect more TLB configuration
The Intel Software Developer’s Manual covers few more TLB
configurations exposed as CPUID 2 descriptors:
61H Instruction TLB: 4 KByte pages, fully associative, 48 entries
63H Data TLB: 1 GByte pages, 4-way set associative, 4 entries
76H Instruction TLB: 2M/4M pages, fully associative, 8 entries
B5H Instruction TLB: 4KByte pages, 8-way set associative, 64 entries
B6H Instruction TLB: 4KByte pages, 8-way set associative, 128 entries
C1H Shared 2nd-Level TLB: 4 KByte/2MByte pages, 8-way associative, 1024 entries
C2H DTLB DTLB: 2 MByte/$MByte pages, 4-way associative, 16 entries
Let's detect them as well.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Link: http://lkml.kernel.org/r/1387801018-14499-1-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2013-12-23 05:16:58 -07:00
|
|
|
u16 __read_mostly tlb_lld_1g[NR_INFO];
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-27 18:02:16 -07:00
|
|
|
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-05 15:52:18 -07:00
|
|
|
static void cpu_detect_tlb(struct cpuinfo_x86 *c)
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-27 18:02:16 -07:00
|
|
|
{
|
|
|
|
if (this_cpu->c_detect_tlb)
|
|
|
|
this_cpu->c_detect_tlb(c);
|
|
|
|
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-05 15:52:18 -07:00
|
|
|
pr_info("Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n",
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-27 18:02:16 -07:00
|
|
|
tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES],
|
x86, CPU: Fix trivial printk formatting issues with dmesg
dmesg (from util-linux) currently has two methods for reading the kernel
message ring buffer: /dev/kmsg and syslog(2). Since kernel 3.5.0 kmsg
has been the default, which escapes control characters (e.g. new lines)
before they are shown.
This change means that when dmesg is using /dev/kmsg, a 2 line printk
makes the output messy, because the second line does not get a
timestamp.
For example:
[ 0.012863] CPU0: Thermal monitoring enabled (TM1)
[ 0.012869] Last level iTLB entries: 4KB 1024, 2MB 1024, 4MB 1024
Last level dTLB entries: 4KB 1024, 2MB 1024, 4MB 1024, 1GB 4
[ 0.012958] Freeing SMP alternatives memory: 28K (ffffffff81d86000 - ffffffff81d8d000)
[ 0.014961] dmar: Host address width 39
Because printk.c intentionally escapes control characters, they should
not be there in the first place. This patch fixes two occurrences of
this.
Signed-off-by: Steven Honeyman <stevenhoneyman@gmail.com>
Link: https://lkml.kernel.org/r/1414856696-8094-1-git-send-email-stevenhoneyman@gmail.com
[ Boris: make cpu_detect_tlb() static, while at it. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-11-05 15:52:18 -07:00
|
|
|
tlb_lli_4m[ENTRIES]);
|
|
|
|
|
|
|
|
pr_info("Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n",
|
|
|
|
tlb_lld_4k[ENTRIES], tlb_lld_2m[ENTRIES],
|
|
|
|
tlb_lld_4m[ENTRIES], tlb_lld_1g[ENTRIES]);
|
x86/tlb_info: get last level TLB entry number of CPU
For 4KB pages, x86 CPU has 2 or 1 level TLB, first level is data TLB and
instruction TLB, second level is shared TLB for both data and instructions.
For hupe page TLB, usually there is just one level and seperated by 2MB/4MB
and 1GB.
Although each levels TLB size is important for performance tuning, but for
genernal and rude optimizing, last level TLB entry number is suitable. And
in fact, last level TLB always has the biggest entry number.
This patch will get the biggest TLB entry number and use it in furture TLB
optimizing.
Accroding Borislav's suggestion, except tlb_ll[i/d]_* array, other
function and data will be released after system boot up.
For all kinds of x86 vendor friendly, vendor specific code was moved to its
specific files.
Signed-off-by: Alex Shi <alex.shi@intel.com>
Link: http://lkml.kernel.org/r/1340845344-27557-2-git-send-email-alex.shi@intel.com
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2012-06-27 18:02:16 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void get_cpu_vendor(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
char *v = c->x86_vendor_id;
|
2009-03-14 00:46:17 -07:00
|
|
|
int i;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
2008-09-04 12:09:45 -07:00
|
|
|
if (!cpu_devs[i])
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
|
|
|
(cpu_devs[i]->c_ident[1] &&
|
|
|
|
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2008-09-04 12:09:45 -07:00
|
|
|
this_cpu = cpu_devs[i];
|
|
|
|
c->x86_vendor = this_cpu->c_x86_vendor;
|
|
|
|
return;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
2008-09-04 12:09:45 -07:00
|
|
|
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_err_once("CPU: vendor_id '%s' unknown, using generic init.\n" \
|
|
|
|
"CPU: Your system may be unstable.\n", v);
|
2008-09-04 12:09:45 -07:00
|
|
|
|
2006-02-05 00:28:03 -07:00
|
|
|
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
|
|
|
this_cpu = &default_cpu;
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void cpu_detect(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
/* Get vendor name */
|
2008-02-01 09:49:43 -07:00
|
|
|
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
|
|
|
(unsigned int *)&c->x86_vendor_id[0],
|
|
|
|
(unsigned int *)&c->x86_vendor_id[8],
|
|
|
|
(unsigned int *)&c->x86_vendor_id[4]);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
c->x86 = 4;
|
2008-09-04 12:09:44 -07:00
|
|
|
/* Intel-defined flags: level 0x00000001 */
|
2005-04-16 15:20:36 -07:00
|
|
|
if (c->cpuid_level >= 0x00000001) {
|
|
|
|
u32 junk, tfms, cap0, misc;
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
2015-11-23 03:12:21 -07:00
|
|
|
c->x86 = x86_family(tfms);
|
|
|
|
c->x86_model = x86_model(tfms);
|
2017-12-31 18:52:10 -07:00
|
|
|
c->x86_stepping = x86_stepping(tfms);
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2008-01-31 14:05:45 -07:00
|
|
|
if (cap0 & (1<<19)) {
|
|
|
|
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
2008-09-04 12:09:44 -07:00
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2008-01-31 14:05:45 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
}
|
2008-09-04 12:09:44 -07:00
|
|
|
|
2017-01-18 12:15:38 -07:00
|
|
|
static void apply_forced_caps(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2017-12-04 07:07:32 -07:00
|
|
|
for (i = 0; i < NCAPINTS + NBUGINTS; i++) {
|
2017-01-18 12:15:38 -07:00
|
|
|
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
|
|
|
c->x86_capability[i] |= cpu_caps_set[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-30 07:30:23 -07:00
|
|
|
static void init_speculation_control(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* The Intel SPEC_CTRL CPUID bit implies IBRS and IBPB support,
|
|
|
|
* and they also have a different bit for STIBP support. Also,
|
|
|
|
* a hypervisor might have set the individual AMD bits even on
|
|
|
|
* Intel CPUs, for finer-grained selection of what's available.
|
|
|
|
*/
|
|
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
2018-05-10 10:13:18 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
2018-01-30 07:30:23 -07:00
|
|
|
}
|
2018-05-02 09:15:14 -07:00
|
|
|
|
2018-01-30 07:30:23 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
2018-05-02 09:15:14 -07:00
|
|
|
|
2018-05-10 13:06:39 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
|
|
|
|
cpu_has(c, X86_FEATURE_VIRT_SSBD))
|
2018-05-10 11:21:36 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
|
|
|
2018-05-10 10:13:18 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
|
2018-05-02 09:15:14 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_IBRS);
|
2018-05-10 10:13:18 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
}
|
2018-05-02 09:15:14 -07:00
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBPB))
|
|
|
|
set_cpu_cap(c, X86_FEATURE_IBPB);
|
|
|
|
|
2018-05-10 10:13:18 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_STIBP)) {
|
2018-05-02 09:15:14 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_STIBP);
|
2018-05-10 10:13:18 -07:00
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
}
|
2018-06-01 07:59:20 -07:00
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_SSBD)) {
|
|
|
|
set_cpu_cap(c, X86_FEATURE_SSBD);
|
|
|
|
set_cpu_cap(c, X86_FEATURE_MSR_SPEC_CTRL);
|
|
|
|
clear_cpu_cap(c, X86_FEATURE_VIRT_SSBD);
|
|
|
|
}
|
2018-01-30 07:30:23 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void get_cpu_cap(struct cpuinfo_x86 *c)
|
2008-01-30 05:33:32 -07:00
|
|
|
{
|
2015-12-07 02:39:40 -07:00
|
|
|
u32 eax, ebx, ecx, edx;
|
2008-01-30 05:33:32 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
/* Intel-defined flags: level 0x00000001 */
|
|
|
|
if (c->cpuid_level >= 0x00000001) {
|
2015-12-07 02:39:40 -07:00
|
|
|
cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2015-12-07 02:39:40 -07:00
|
|
|
c->x86_capability[CPUID_1_ECX] = ecx;
|
|
|
|
c->x86_capability[CPUID_1_EDX] = edx;
|
2008-09-04 12:09:44 -07:00
|
|
|
}
|
2008-01-30 05:33:32 -07:00
|
|
|
|
2016-12-15 11:14:42 -07:00
|
|
|
/* Thermal and Power Management Leaf: level 0x00000006 (eax) */
|
|
|
|
if (c->cpuid_level >= 0x00000006)
|
|
|
|
c->x86_capability[CPUID_6_EAX] = cpuid_eax(0x00000006);
|
|
|
|
|
2010-07-07 17:29:18 -07:00
|
|
|
/* Additional Intel-defined flags: level 0x00000007 */
|
|
|
|
if (c->cpuid_level >= 0x00000007) {
|
|
|
|
cpuid_count(0x00000007, 0, &eax, &ebx, &ecx, &edx);
|
2015-12-07 02:39:40 -07:00
|
|
|
c->x86_capability[CPUID_7_0_EBX] = ebx;
|
x86/cpufeature, x86/mm/pkeys: Add protection keys related CPUID definitions
There are two CPUID bits for protection keys. One is for whether
the CPU contains the feature, and the other will appear set once
the OS enables protection keys. Specifically:
Bit 04: OSPKE. If 1, OS has set CR4.PKE to enable
Protection keys (and the RDPKRU/WRPKRU instructions)
This is because userspace can not see CR4 contents, but it can
see CPUID contents.
X86_FEATURE_PKU is referred to as "PKU" in the hardware documentation:
CPUID.(EAX=07H,ECX=0H):ECX.PKU [bit 3]
X86_FEATURE_OSPKE is "OSPKU":
CPUID.(EAX=07H,ECX=0H):ECX.OSPKE [bit 4]
These are the first CPU features which need to look at the
ECX word in CPUID leaf 0x7, so this patch also includes
fetching that word in to the cpuinfo->x86_capability[] array.
Add it to the disabled-features mask when its config option is
off. Even though we are not using it here, we also extend the
REQUIRED_MASK_BIT_SET() macro to keep it mirroring the
DISABLED_MASK_BIT_SET() version.
This means that in almost all code, you should use:
cpu_has(c, X86_FEATURE_PKU)
and *not* the CONFIG option.
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave@sr71.net>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/20160212210201.7714C250@viggo.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-12 14:02:01 -07:00
|
|
|
c->x86_capability[CPUID_7_ECX] = ecx;
|
2018-01-25 09:14:09 -07:00
|
|
|
c->x86_capability[CPUID_7_EDX] = edx;
|
2019-06-17 11:00:16 -07:00
|
|
|
|
|
|
|
/* Check valid sub-leaf index before accessing it */
|
|
|
|
if (eax >= 1) {
|
|
|
|
cpuid_count(0x00000007, 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
c->x86_capability[CPUID_7_1_EAX] = eax;
|
|
|
|
}
|
2010-07-07 17:29:18 -07:00
|
|
|
}
|
|
|
|
|
2014-05-29 11:12:30 -07:00
|
|
|
/* Extended state features: level 0x0000000d */
|
|
|
|
if (c->cpuid_level >= 0x0000000d) {
|
|
|
|
cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
2015-12-07 02:39:40 -07:00
|
|
|
c->x86_capability[CPUID_D_1_EAX] = eax;
|
2014-05-29 11:12:30 -07:00
|
|
|
}
|
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
/* AMD-defined flags: level 0x80000001 */
|
2015-12-07 02:39:40 -07:00
|
|
|
eax = cpuid_eax(0x80000000);
|
|
|
|
c->extended_cpuid_level = eax;
|
|
|
|
|
|
|
|
if ((eax & 0xffff0000) == 0x80000000) {
|
|
|
|
if (eax >= 0x80000001) {
|
|
|
|
cpuid(0x80000001, &eax, &ebx, &ecx, &edx);
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2015-12-07 02:39:40 -07:00
|
|
|
c->x86_capability[CPUID_8000_0001_ECX] = ecx;
|
|
|
|
c->x86_capability[CPUID_8000_0001_EDX] = edx;
|
2008-01-30 05:33:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-11 05:58:26 -07:00
|
|
|
if (c->extended_cpuid_level >= 0x80000007) {
|
|
|
|
cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
c->x86_capability[CPUID_8000_0007_EBX] = ebx;
|
|
|
|
c->x86_power = edx;
|
|
|
|
}
|
|
|
|
|
2018-04-30 12:47:46 -07:00
|
|
|
if (c->extended_cpuid_level >= 0x80000008) {
|
|
|
|
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
|
|
|
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
|
|
|
|
}
|
|
|
|
|
2015-12-07 02:39:39 -07:00
|
|
|
if (c->extended_cpuid_level >= 0x8000000a)
|
2015-12-07 02:39:40 -07:00
|
|
|
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
|
2008-01-30 05:33:32 -07:00
|
|
|
|
2021-01-22 13:40:46 -07:00
|
|
|
if (c->extended_cpuid_level >= 0x8000001f)
|
|
|
|
c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f);
|
|
|
|
|
2023-01-10 15:46:37 -07:00
|
|
|
if (c->extended_cpuid_level >= 0x80000021)
|
|
|
|
c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021);
|
|
|
|
|
2010-05-19 12:01:23 -07:00
|
|
|
init_scattered_cpuid_features(c);
|
2018-01-30 07:30:23 -07:00
|
|
|
init_speculation_control(c);
|
2017-01-18 12:15:39 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear/Set all flags overridden by options, after probe.
|
|
|
|
* This needs to happen each time we re-probe, which may happen
|
|
|
|
* several times during CPU initialization.
|
|
|
|
*/
|
|
|
|
apply_forced_caps(c);
|
2008-01-30 05:33:32 -07:00
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2018-07-24 05:45:47 -07:00
|
|
|
void get_cpu_address_sizes(struct cpuinfo_x86 *c)
|
2018-04-10 02:27:04 -07:00
|
|
|
{
|
|
|
|
u32 eax, ebx, ecx, edx;
|
|
|
|
|
2023-09-11 17:27:03 -07:00
|
|
|
if (!cpu_has(c, X86_FEATURE_CPUID) ||
|
2024-03-16 05:07:06 -07:00
|
|
|
(c->extended_cpuid_level < 0x80000008)) {
|
2023-09-11 17:27:03 -07:00
|
|
|
if (IS_ENABLED(CONFIG_X86_64)) {
|
|
|
|
c->x86_clflush_size = 64;
|
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
c->x86_virt_bits = 48;
|
|
|
|
} else {
|
|
|
|
c->x86_clflush_size = 32;
|
|
|
|
c->x86_virt_bits = 32;
|
|
|
|
c->x86_phys_bits = 32;
|
|
|
|
|
|
|
|
if (cpu_has(c, X86_FEATURE_PAE) ||
|
|
|
|
cpu_has(c, X86_FEATURE_PSE36))
|
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
}
|
2024-03-16 05:07:06 -07:00
|
|
|
} else {
|
|
|
|
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
|
|
|
|
|
|
|
|
c->x86_virt_bits = (eax >> 8) & 0xff;
|
|
|
|
c->x86_phys_bits = eax & 0xff;
|
x86/cpu: Provide default cache line size if not enumerated
tl;dr: CPUs with CPUID.80000008H but without CPUID.01H:EDX[CLFSH]
will end up reporting cache_line_size()==0 and bad things happen.
Fill in a default on those to avoid the problem.
Long Story:
The kernel dies a horrible death if c->x86_cache_alignment (aka.
cache_line_size() is 0. Normally, this value is populated from
c->x86_clflush_size.
Right now the code is set up to get c->x86_clflush_size from two
places. First, modern CPUs get it from CPUID. Old CPUs that don't
have leaf 0x80000008 (or CPUID at all) just get some sane defaults
from the kernel in get_cpu_address_sizes().
The vast majority of CPUs that have leaf 0x80000008 also get
->x86_clflush_size from CPUID. But there are oddballs.
Intel Quark CPUs[1] and others[2] have leaf 0x80000008 but don't set
CPUID.01H:EDX[CLFSH], so they skip over filling in ->x86_clflush_size:
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
if (cap0 & (1<<19))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
So they: land in get_cpu_address_sizes() and see that CPUID has level
0x80000008 and jump into the side of the if() that does not fill in
c->x86_clflush_size. That assigns a 0 to c->x86_cache_alignment, and
hilarity ensues in code like:
buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
GFP_KERNEL);
To fix this, always provide a sane value for ->x86_clflush_size.
Big thanks to Andy Shevchenko for finding and reporting this and also
providing a first pass at a fix. But his fix was only partial and only
worked on the Quark CPUs. It would not, for instance, have worked on
the QEMU config.
1. https://raw.githubusercontent.com/InstLatx64/InstLatx64/master/GenuineIntel/GenuineIntel0000590_Clanton_03_CPUID.txt
2. You can also get this behavior if you use "-cpu 486,+clzero"
in QEMU.
[ dhansen: remove 'vp_bits_from_cpuid' reference in changelog
because bpetkov brutally murdered it recently. ]
Fixes: fbf6449f84bf ("x86/sev-es: Set x86_virt_bits to the correct value straight away, instead of a two-phase approach")
Reported-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Tested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Tested-by: Jörn Heusipp <osmanx@heusipp.de>
Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/all/20240516173928.3960193-1-andriy.shevchenko@linux.intel.com/
Link: https://lore.kernel.org/lkml/5e31cad3-ad4d-493e-ab07-724cfbfaba44@heusipp.de/
Link: https://lore.kernel.org/all/20240517200534.8EC5F33E%40davehans-spike.ostc.intel.com
2024-05-17 13:05:34 -07:00
|
|
|
|
|
|
|
/* Provide a sane default if not enumerated: */
|
|
|
|
if (!c->x86_clflush_size)
|
|
|
|
c->x86_clflush_size = 32;
|
2018-04-10 02:27:04 -07:00
|
|
|
}
|
2024-03-16 05:07:06 -07:00
|
|
|
|
2018-08-24 10:03:50 -07:00
|
|
|
c->x86_cache_bits = c->x86_phys_bits;
|
2023-10-02 15:00:45 -07:00
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2018-04-10 02:27:04 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
2008-09-14 02:33:15 -07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First of all, decide if this is a 486 or higher
|
|
|
|
* It's a 486 if we can modify the AC flag
|
|
|
|
*/
|
|
|
|
if (flag_is_changeable_p(X86_EFLAGS_AC))
|
|
|
|
c->x86 = 4;
|
|
|
|
else
|
|
|
|
c->x86 = 3;
|
|
|
|
|
|
|
|
for (i = 0; i < X86_VENDOR_NUM; i++)
|
|
|
|
if (cpu_devs[i] && cpu_devs[i]->c_identify) {
|
|
|
|
c->x86_vendor_id[0] = 0;
|
|
|
|
cpu_devs[i]->c_identify(c);
|
|
|
|
if (c->x86_vendor_id[0]) {
|
|
|
|
get_cpu_vendor(c);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2019-11-04 04:22:01 -07:00
|
|
|
#define NO_SPECULATION BIT(0)
|
|
|
|
#define NO_MELTDOWN BIT(1)
|
|
|
|
#define NO_SSB BIT(2)
|
|
|
|
#define NO_L1TF BIT(3)
|
|
|
|
#define NO_MDS BIT(4)
|
|
|
|
#define MSBDS_ONLY BIT(5)
|
|
|
|
#define NO_SWAPGS BIT(6)
|
|
|
|
#define NO_ITLB_MULTIHIT BIT(7)
|
2020-01-16 19:24:31 -07:00
|
|
|
#define NO_SPECTRE_V2 BIT(8)
|
2022-08-03 14:41:32 -07:00
|
|
|
#define NO_MMIO BIT(9)
|
|
|
|
#define NO_EIBRS_PBRSB BIT(10)
|
2024-03-11 08:57:03 -07:00
|
|
|
#define NO_BHI BIT(11)
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2020-03-20 06:13:48 -07:00
|
|
|
#define VULNWL(vendor, family, model, whitelist) \
|
|
|
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
#define VULNWL_INTEL(vfm, whitelist) \
|
|
|
|
X86_MATCH_VFM(vfm, whitelist)
|
2019-02-27 02:10:23 -07:00
|
|
|
|
|
|
|
#define VULNWL_AMD(family, whitelist) \
|
|
|
|
VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
|
|
|
|
|
|
|
|
#define VULNWL_HYGON(family, whitelist) \
|
|
|
|
VULNWL(HYGON, family, X86_MODEL_ANY, whitelist)
|
|
|
|
|
|
|
|
static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|
|
|
VULNWL(ANY, 4, X86_MODEL_ANY, NO_SPECULATION),
|
|
|
|
VULNWL(CENTAUR, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
|
|
VULNWL(INTEL, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
|
|
VULNWL(NSC, 5, X86_MODEL_ANY, NO_SPECULATION),
|
2021-10-17 02:44:10 -07:00
|
|
|
VULNWL(VORTEX, 5, X86_MODEL_ANY, NO_SPECULATION),
|
|
|
|
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2019-01-18 17:50:16 -07:00
|
|
|
/* Intel Family 6 */
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO),
|
|
|
|
VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO),
|
|
|
|
VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO),
|
|
|
|
VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO),
|
2022-08-03 14:41:32 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
2019-11-04 04:22:01 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
|
|
|
VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2024-08-29 12:24:37 -07:00
|
|
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
2019-02-27 02:10:23 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
2019-07-17 12:18:59 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Technically, swapgs isn't serializing on AMD (despite it previously
|
|
|
|
* being documented as such in the APM). But according to AMD, %gs is
|
|
|
|
* updated non-speculatively, and the issuing of %gs-relative memory
|
|
|
|
* operands will be blocked until the %gs update completes, which is
|
|
|
|
* good enough for our purposes.
|
|
|
|
*/
|
2019-01-18 17:50:16 -07:00
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB),
|
|
|
|
VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
2019-11-04 04:22:01 -07:00
|
|
|
|
2019-01-18 17:50:16 -07:00
|
|
|
/* AMD Family 0xf - 0x12 */
|
2024-03-11 08:57:03 -07:00
|
|
|
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
|
|
|
VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
|
|
|
VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
|
|
|
VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
2019-02-27 02:10:23 -07:00
|
|
|
|
|
|
|
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
|
2024-03-11 08:57:03 -07:00
|
|
|
VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
|
|
|
|
VULNWL_HYGON(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB | NO_BHI),
|
2020-01-16 19:24:31 -07:00
|
|
|
|
|
|
|
/* Zhaoxin Family 7 */
|
2024-03-11 08:57:03 -07:00
|
|
|
VULNWL(CENTAUR, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
|
|
|
|
VULNWL(ZHAOXIN, 7, X86_MODEL_ANY, NO_SPECTRE_V2 | NO_SWAPGS | NO_MMIO | NO_BHI),
|
2018-01-25 09:14:13 -07:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2022-06-14 14:15:49 -07:00
|
|
|
#define VULNBL(vendor, family, model, blacklist) \
|
|
|
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
|
|
|
|
|
2024-04-24 11:15:07 -07:00
|
|
|
#define VULNBL_INTEL_STEPPINGS(vfm, steppings, issues) \
|
|
|
|
X86_MATCH_VFM_STEPPINGS(vfm, steppings, issues)
|
2020-04-16 08:54:04 -07:00
|
|
|
|
2022-06-14 14:15:49 -07:00
|
|
|
#define VULNBL_AMD(family, blacklist) \
|
|
|
|
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
|
|
|
|
|
|
|
|
#define VULNBL_HYGON(family, blacklist) \
|
|
|
|
VULNBL(HYGON, family, X86_MODEL_ANY, blacklist)
|
|
|
|
|
2020-04-16 08:54:04 -07:00
|
|
|
#define SRBDS BIT(0)
|
2022-05-19 20:27:08 -07:00
|
|
|
/* CPU is affected by X86_BUG_MMIO_STALE_DATA */
|
|
|
|
#define MMIO BIT(1)
|
2022-05-19 20:34:14 -07:00
|
|
|
/* CPU is affected by Shared Buffers Data Sampling (SBDS), a variant of X86_BUG_MMIO_STALE_DATA */
|
|
|
|
#define MMIO_SBDS BIT(2)
|
2022-06-14 14:15:49 -07:00
|
|
|
/* CPU is affected by RETbleed, speculating where you would not expect it */
|
|
|
|
#define RETBLEED BIT(3)
|
2023-02-09 08:22:24 -07:00
|
|
|
/* CPU is affected by SMT (cross-thread) return predictions */
|
|
|
|
#define SMT_RSB BIT(4)
|
2023-06-28 02:02:39 -07:00
|
|
|
/* CPU is affected by SRSO */
|
|
|
|
#define SRSO BIT(5)
|
2023-07-12 19:43:11 -07:00
|
|
|
/* CPU is affected by GDS */
|
2023-08-07 17:03:54 -07:00
|
|
|
#define GDS BIT(6)
|
2024-03-11 12:29:43 -07:00
|
|
|
/* CPU is affected by Register File Data Sampling */
|
|
|
|
#define RFDS BIT(7)
|
2020-04-16 08:54:04 -07:00
|
|
|
|
|
|
|
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
2024-04-24 11:15:07 -07:00
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_HASWELL_X, X86_STEPPING_ANY, MMIO),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_D, X86_STEPPING_ANY, MMIO),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_BROADWELL, X86_STEPPING_ANY, SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE_L, X86_STEPPING_ANY, GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_TIGERLAKE, X86_STEPPING_ANY, GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
|
|
|
|
VULNBL_INTEL_STEPPINGS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
|
2022-06-14 14:15:49 -07:00
|
|
|
|
|
|
|
VULNBL_AMD(0x15, RETBLEED),
|
|
|
|
VULNBL_AMD(0x16, RETBLEED),
|
2023-06-28 02:02:39 -07:00
|
|
|
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
|
2023-09-27 23:59:16 -07:00
|
|
|
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
|
2023-06-28 02:02:39 -07:00
|
|
|
VULNBL_AMD(0x19, SRSO),
|
2020-04-16 08:54:04 -07:00
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
static bool __init cpu_matches(const struct x86_cpu_id *table, unsigned long which)
|
2019-02-27 02:10:23 -07:00
|
|
|
{
|
2020-04-16 08:32:42 -07:00
|
|
|
const struct x86_cpu_id *m = x86_match_cpu(table);
|
2018-04-25 19:04:20 -07:00
|
|
|
|
2019-02-27 02:10:23 -07:00
|
|
|
return m && !!(m->driver_data & which);
|
|
|
|
}
|
2018-06-13 15:48:26 -07:00
|
|
|
|
2019-10-23 01:52:35 -07:00
|
|
|
u64 x86_read_arch_cap_msr(void)
|
2018-01-25 09:14:13 -07:00
|
|
|
{
|
2024-04-11 00:25:36 -07:00
|
|
|
u64 x86_arch_cap_msr = 0;
|
2018-01-25 09:14:13 -07:00
|
|
|
|
2019-10-23 01:52:35 -07:00
|
|
|
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES))
|
2024-04-11 00:25:36 -07:00
|
|
|
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, x86_arch_cap_msr);
|
2019-10-23 01:52:35 -07:00
|
|
|
|
2024-04-11 00:25:36 -07:00
|
|
|
return x86_arch_cap_msr;
|
2019-10-23 01:52:35 -07:00
|
|
|
}
|
|
|
|
|
2024-04-11 00:25:36 -07:00
|
|
|
static bool arch_cap_mmio_immune(u64 x86_arch_cap_msr)
|
2022-05-19 20:27:08 -07:00
|
|
|
{
|
2024-04-11 00:25:36 -07:00
|
|
|
return (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO &&
|
|
|
|
x86_arch_cap_msr & ARCH_CAP_PSDP_NO &&
|
|
|
|
x86_arch_cap_msr & ARCH_CAP_SBDR_SSDP_NO);
|
2022-05-19 20:27:08 -07:00
|
|
|
}
|
|
|
|
|
2024-04-11 00:25:36 -07:00
|
|
|
static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
|
2024-03-11 12:29:43 -07:00
|
|
|
{
|
|
|
|
/* The "immunity" bit trumps everything else: */
|
2024-04-11 00:25:36 -07:00
|
|
|
if (x86_arch_cap_msr & ARCH_CAP_RFDS_NO)
|
2024-03-11 12:29:43 -07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMMs set ARCH_CAP_RFDS_CLEAR for processors not in the blacklist to
|
|
|
|
* indicate that mitigation is needed because guest is running on a
|
|
|
|
* vulnerable hardware or may migrate to such hardware:
|
|
|
|
*/
|
2024-04-11 00:25:36 -07:00
|
|
|
if (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR)
|
2024-03-11 12:29:43 -07:00
|
|
|
return true;
|
|
|
|
|
|
|
|
/* Only consult the blacklist when there is no enumeration: */
|
|
|
|
return cpu_matches(cpu_vuln_blacklist, RFDS);
|
|
|
|
}
|
|
|
|
|
2019-10-23 01:52:35 -07:00
|
|
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|
|
|
{
|
2024-04-11 00:25:36 -07:00
|
|
|
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
|
2019-10-23 01:52:35 -07:00
|
|
|
|
2019-11-04 04:22:01 -07:00
|
|
|
/* Set ITLB_MULTIHIT bug if cpu is not in the whitelist and not mitigated */
|
2020-04-16 08:32:42 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_ITLB_MULTIHIT) &&
|
2024-04-11 00:25:36 -07:00
|
|
|
!(x86_arch_cap_msr & ARCH_CAP_PSCHANGE_MC_NO))
|
2019-11-04 04:22:01 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_ITLB_MULTIHIT);
|
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (cpu_matches(cpu_vuln_whitelist, NO_SPECULATION))
|
2018-05-22 02:05:39 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V1);
|
2020-01-16 19:24:31 -07:00
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_SPECTRE_V2))
|
2020-01-16 19:24:31 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_SPECTRE_V2);
|
2018-05-22 02:05:39 -07:00
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_SSB) &&
|
2024-04-11 00:25:36 -07:00
|
|
|
!(x86_arch_cap_msr & ARCH_CAP_SSB_NO) &&
|
2018-06-01 07:59:19 -07:00
|
|
|
!cpu_has(c, X86_FEATURE_AMD_SSB_NO))
|
2018-04-25 19:04:20 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_SPEC_STORE_BYPASS);
|
|
|
|
|
2023-01-24 09:33:18 -07:00
|
|
|
/*
|
|
|
|
* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel feature
|
|
|
|
* flag and protect from vendor-specific bugs via the whitelist.
|
2024-01-25 21:11:02 -07:00
|
|
|
*
|
|
|
|
* Don't use AutoIBRS when SNP is enabled because it degrades host
|
|
|
|
* userspace indirect branch performance.
|
2023-01-24 09:33:18 -07:00
|
|
|
*/
|
2024-04-11 00:25:36 -07:00
|
|
|
if ((x86_arch_cap_msr & ARCH_CAP_IBRS_ALL) ||
|
2024-01-25 21:11:02 -07:00
|
|
|
(cpu_has(c, X86_FEATURE_AUTOIBRS) &&
|
|
|
|
!cpu_feature_enabled(X86_FEATURE_SEV_SNP))) {
|
2018-08-01 11:42:25 -07:00
|
|
|
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
|
2023-01-24 09:33:18 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_EIBRS_PBRSB) &&
|
2024-04-11 00:25:36 -07:00
|
|
|
!(x86_arch_cap_msr & ARCH_CAP_PBRSB_NO))
|
2023-01-24 09:33:18 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_EIBRS_PBRSB);
|
|
|
|
}
|
2018-08-01 11:42:25 -07:00
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
|
2024-04-11 00:25:36 -07:00
|
|
|
!(x86_arch_cap_msr & ARCH_CAP_MDS_NO)) {
|
2019-01-18 17:50:16 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_MDS);
|
2020-04-16 08:32:42 -07:00
|
|
|
if (cpu_matches(cpu_vuln_whitelist, MSBDS_ONLY))
|
2019-03-01 12:21:08 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
|
|
|
|
}
|
2019-01-18 17:50:16 -07:00
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (!cpu_matches(cpu_vuln_whitelist, NO_SWAPGS))
|
2019-07-17 12:18:59 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_SWAPGS);
|
|
|
|
|
2019-10-23 02:30:45 -07:00
|
|
|
/*
|
|
|
|
* When the CPU is not mitigated for TAA (TAA_NO=0) set TAA bug when:
|
|
|
|
* - TSX is supported or
|
|
|
|
* - TSX_CTRL is present
|
|
|
|
*
|
|
|
|
* TSX_CTRL check is needed for cases when TSX could be disabled before
|
|
|
|
* the kernel boot e.g. kexec.
|
|
|
|
* TSX_CTRL check alone is not sufficient for cases when the microcode
|
|
|
|
* update is not present or running as guest that don't get TSX_CTRL.
|
|
|
|
*/
|
2024-04-11 00:25:36 -07:00
|
|
|
if (!(x86_arch_cap_msr & ARCH_CAP_TAA_NO) &&
|
2019-10-23 02:30:45 -07:00
|
|
|
(cpu_has(c, X86_FEATURE_RTM) ||
|
2024-04-11 00:25:36 -07:00
|
|
|
(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)))
|
2019-10-23 02:30:45 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_TAA);
|
|
|
|
|
2020-04-16 08:54:04 -07:00
|
|
|
/*
|
|
|
|
* SRBDS affects CPUs which support RDRAND or RDSEED and are listed
|
|
|
|
* in the vulnerability blacklist.
|
2022-05-19 20:34:14 -07:00
|
|
|
*
|
|
|
|
* Some of the implications and mitigation of Shared Buffers Data
|
|
|
|
* Sampling (SBDS) are similar to SRBDS. Give SBDS same treatment as
|
|
|
|
* SRBDS.
|
2020-04-16 08:54:04 -07:00
|
|
|
*/
|
|
|
|
if ((cpu_has(c, X86_FEATURE_RDRAND) ||
|
|
|
|
cpu_has(c, X86_FEATURE_RDSEED)) &&
|
2022-05-19 20:34:14 -07:00
|
|
|
cpu_matches(cpu_vuln_blacklist, SRBDS | MMIO_SBDS))
|
2020-04-16 08:54:04 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_SRBDS);
|
|
|
|
|
2022-05-19 20:27:08 -07:00
|
|
|
/*
|
|
|
|
* Processor MMIO Stale Data bug enumeration
|
|
|
|
*
|
|
|
|
* Affected CPU list is generally enough to enumerate the vulnerability,
|
|
|
|
* but for virtualization case check for ARCH_CAP MSR bits also, VMM may
|
|
|
|
* not want the guest to enumerate the bug.
|
2022-08-03 14:41:32 -07:00
|
|
|
*
|
|
|
|
* Set X86_BUG_MMIO_UNKNOWN for CPUs that are neither in the blacklist,
|
|
|
|
* nor in the whitelist and also don't enumerate MSR ARCH_CAP MMIO bits.
|
2022-05-19 20:27:08 -07:00
|
|
|
*/
|
2024-04-11 00:25:36 -07:00
|
|
|
if (!arch_cap_mmio_immune(x86_arch_cap_msr)) {
|
2022-08-03 14:41:32 -07:00
|
|
|
if (cpu_matches(cpu_vuln_blacklist, MMIO))
|
|
|
|
setup_force_cpu_bug(X86_BUG_MMIO_STALE_DATA);
|
|
|
|
else if (!cpu_matches(cpu_vuln_whitelist, NO_MMIO))
|
|
|
|
setup_force_cpu_bug(X86_BUG_MMIO_UNKNOWN);
|
|
|
|
}
|
2022-05-19 20:27:08 -07:00
|
|
|
|
2022-06-24 06:41:21 -07:00
|
|
|
if (!cpu_has(c, X86_FEATURE_BTC_NO)) {
|
2024-04-11 00:25:36 -07:00
|
|
|
if (cpu_matches(cpu_vuln_blacklist, RETBLEED) || (x86_arch_cap_msr & ARCH_CAP_RSBA))
|
2022-06-24 06:41:21 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_RETBLEED);
|
|
|
|
}
|
2022-06-14 14:15:49 -07:00
|
|
|
|
2023-02-09 08:22:24 -07:00
|
|
|
if (cpu_matches(cpu_vuln_blacklist, SMT_RSB))
|
|
|
|
setup_force_cpu_bug(X86_BUG_SMT_RSB);
|
|
|
|
|
2023-06-29 08:43:40 -07:00
|
|
|
if (!cpu_has(c, X86_FEATURE_SRSO_NO)) {
|
|
|
|
if (cpu_matches(cpu_vuln_blacklist, SRSO))
|
|
|
|
setup_force_cpu_bug(X86_BUG_SRSO);
|
|
|
|
}
|
2023-06-28 02:02:39 -07:00
|
|
|
|
2023-07-12 19:43:11 -07:00
|
|
|
/*
|
|
|
|
* Check if CPU is vulnerable to GDS. If running in a virtual machine on
|
|
|
|
* an affected processor, the VMM may have disabled the use of GATHER by
|
|
|
|
* disabling AVX2. The only way to do this in HW is to clear XCR0[2],
|
|
|
|
* which means that AVX will be disabled.
|
|
|
|
*/
|
2024-04-11 00:25:36 -07:00
|
|
|
if (cpu_matches(cpu_vuln_blacklist, GDS) && !(x86_arch_cap_msr & ARCH_CAP_GDS_NO) &&
|
2023-07-12 19:43:11 -07:00
|
|
|
boot_cpu_has(X86_FEATURE_AVX))
|
|
|
|
setup_force_cpu_bug(X86_BUG_GDS);
|
|
|
|
|
2024-04-11 00:25:36 -07:00
|
|
|
if (vulnerable_to_rfds(x86_arch_cap_msr))
|
2024-03-11 12:29:43 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_RFDS);
|
|
|
|
|
2024-03-11 08:57:03 -07:00
|
|
|
/* When virtualized, eIBRS could be hidden, assume vulnerable */
|
2024-04-11 00:25:36 -07:00
|
|
|
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
|
2024-03-11 08:57:03 -07:00
|
|
|
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
|
|
|
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
|
|
|
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
|
|
|
setup_force_cpu_bug(X86_BUG_BHI);
|
|
|
|
|
2024-09-23 11:49:34 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
|
|
|
|
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
|
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
2018-04-25 19:04:16 -07:00
|
|
|
return;
|
2018-01-25 09:14:13 -07:00
|
|
|
|
|
|
|
/* Rogue Data Cache Load? No! */
|
2024-04-11 00:25:36 -07:00
|
|
|
if (x86_arch_cap_msr & ARCH_CAP_RDCL_NO)
|
2018-04-25 19:04:16 -07:00
|
|
|
return;
|
2018-01-25 09:14:13 -07:00
|
|
|
|
2018-04-25 19:04:16 -07:00
|
|
|
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
|
2018-06-13 15:48:26 -07:00
|
|
|
|
2020-04-16 08:32:42 -07:00
|
|
|
if (cpu_matches(cpu_vuln_whitelist, NO_L1TF))
|
2018-06-13 15:48:26 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
setup_force_cpu_bug(X86_BUG_L1TF);
|
2018-01-25 09:14:13 -07:00
|
|
|
}
|
|
|
|
|
2018-07-19 13:55:28 -07:00
|
|
|
/*
|
|
|
|
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
|
|
|
|
* unfortunately, that's not true in practice because of early VIA
|
|
|
|
* chips and (more importantly) broken virtualizers that are not easy
|
|
|
|
* to detect. In the latter case it doesn't even *fail* reliably, so
|
|
|
|
* probing for it doesn't even work. Disable it completely on 32-bit
|
|
|
|
* unless we can find a reliable way to detect all the broken cases.
|
|
|
|
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
|
|
|
|
*/
|
2018-07-19 13:55:29 -07:00
|
|
|
static void detect_nopl(void)
|
2018-07-19 13:55:28 -07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_X86_32
|
2018-07-19 13:55:29 -07:00
|
|
|
setup_clear_cpu_cap(X86_FEATURE_NOPL);
|
2018-07-19 13:55:28 -07:00
|
|
|
#else
|
2018-07-19 13:55:29 -07:00
|
|
|
setup_force_cpu_cap(X86_FEATURE_NOPL);
|
2018-07-19 13:55:28 -07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-09-21 14:56:38 -07:00
|
|
|
/*
|
|
|
|
* We parse cpu parameters early because fpu__init_system() is executed
|
|
|
|
* before parse_early_param().
|
|
|
|
*/
|
|
|
|
static void __init cpu_parse_early_param(void)
|
|
|
|
{
|
|
|
|
char arg[128];
|
2022-01-27 04:56:21 -07:00
|
|
|
char *argptr = arg, *opt;
|
|
|
|
int arglen, taint = 0;
|
2020-09-21 14:56:38 -07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
if (cmdline_find_option_bool(boot_command_line, "no387"))
|
|
|
|
#ifdef CONFIG_MATH_EMULATION
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_FPU);
|
|
|
|
#else
|
|
|
|
pr_err("Option 'no387' required CONFIG_MATH_EMULATION enabled.\n");
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (cmdline_find_option_bool(boot_command_line, "nofxsr"))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_FXSR);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (cmdline_find_option_bool(boot_command_line, "noxsave"))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVE);
|
|
|
|
|
|
|
|
if (cmdline_find_option_bool(boot_command_line, "noxsaveopt"))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
|
|
|
|
|
|
|
|
if (cmdline_find_option_bool(boot_command_line, "noxsaves"))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_XSAVES);
|
|
|
|
|
2023-06-12 17:11:04 -07:00
|
|
|
if (cmdline_find_option_bool(boot_command_line, "nousershstk"))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_USER_SHSTK);
|
|
|
|
|
2024-07-09 08:40:46 -07:00
|
|
|
/* Minimize the gap between FRED is available and available but disabled. */
|
|
|
|
arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
|
|
|
|
if (arglen != 2 || strncmp(arg, "on", 2))
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_FRED);
|
|
|
|
|
2020-09-21 14:56:38 -07:00
|
|
|
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));
|
|
|
|
if (arglen <= 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
pr_info("Clearing CPUID bits:");
|
|
|
|
|
2022-01-27 04:56:21 -07:00
|
|
|
while (argptr) {
|
|
|
|
bool found __maybe_unused = false;
|
|
|
|
unsigned int bit;
|
2020-09-21 14:56:38 -07:00
|
|
|
|
2022-01-27 04:56:21 -07:00
|
|
|
opt = strsep(&argptr, ",");
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handle naked numbers first for feature flags which don't
|
|
|
|
* have names.
|
|
|
|
*/
|
|
|
|
if (!kstrtouint(opt, 10, &bit)) {
|
|
|
|
if (bit < NCAPINTS * 32) {
|
|
|
|
|
|
|
|
/* empty-string, i.e., ""-defined feature flags */
|
|
|
|
if (!x86_cap_flags[bit])
|
|
|
|
pr_cont(" " X86_CAP_FMT_NUM, x86_cap_flag_num(bit));
|
|
|
|
else
|
|
|
|
pr_cont(" " X86_CAP_FMT, x86_cap_flag(bit));
|
|
|
|
|
|
|
|
setup_clear_cpu_cap(bit);
|
|
|
|
taint++;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The assumption is that there are no feature names with only
|
|
|
|
* numbers in the name thus go to the next argument.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (bit = 0; bit < 32 * NCAPINTS; bit++) {
|
|
|
|
if (!x86_cap_flag(bit))
|
|
|
|
continue;
|
2020-09-21 14:56:38 -07:00
|
|
|
|
2022-01-27 04:56:21 -07:00
|
|
|
if (strcmp(x86_cap_flag(bit), opt))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
pr_cont(" %s", opt);
|
2020-09-21 14:56:38 -07:00
|
|
|
setup_clear_cpu_cap(bit);
|
2022-01-27 04:56:21 -07:00
|
|
|
taint++;
|
|
|
|
found = true;
|
|
|
|
break;
|
2020-09-21 14:56:38 -07:00
|
|
|
}
|
2022-01-27 04:56:21 -07:00
|
|
|
|
|
|
|
if (!found)
|
|
|
|
pr_cont(" (unknown: %s)", opt);
|
|
|
|
}
|
2020-09-21 14:56:38 -07:00
|
|
|
pr_cont("\n");
|
2022-01-27 04:56:21 -07:00
|
|
|
|
|
|
|
if (taint)
|
|
|
|
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
2020-09-21 14:56:38 -07:00
|
|
|
}
|
|
|
|
|
2008-02-24 03:58:13 -07:00
|
|
|
/*
|
|
|
|
* Do minimum CPU detection early.
|
|
|
|
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
|
|
|
* cache alignment.
|
|
|
|
* The others are not touched to avoid unwanted side effects.
|
|
|
|
*
|
2017-10-03 02:47:27 -07:00
|
|
|
* WARNING: this function is only called on the boot CPU. Don't add code
|
|
|
|
* here that is supposed to run on all CPUs.
|
2008-02-24 03:58:13 -07:00
|
|
|
*/
|
2008-09-04 12:09:44 -07:00
|
|
|
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
2006-12-06 18:14:08 -07:00
|
|
|
{
|
2018-10-28 05:58:28 -07:00
|
|
|
memset(&c->x86_capability, 0, sizeof(c->x86_capability));
|
2008-09-04 12:09:47 -07:00
|
|
|
c->extended_cpuid_level = 0;
|
2006-12-06 18:14:08 -07:00
|
|
|
|
2018-09-21 14:20:41 -07:00
|
|
|
if (!have_cpuid_p())
|
|
|
|
identify_cpu_without_cpuid(c);
|
|
|
|
|
2008-09-14 02:33:15 -07:00
|
|
|
/* cyrix could have cpuid enabled via c_identify()*/
|
2016-09-28 16:06:33 -07:00
|
|
|
if (have_cpuid_p()) {
|
|
|
|
cpu_detect(c);
|
|
|
|
get_cpu_vendor(c);
|
2024-05-30 08:29:18 -07:00
|
|
|
intel_unlock_cpuid_leafs(c);
|
2016-09-28 16:06:33 -07:00
|
|
|
get_cpu_cap(c);
|
2017-01-18 12:15:37 -07:00
|
|
|
setup_force_cpu_cap(X86_FEATURE_CPUID);
|
x86/cpu: Allow reducing x86_phys_bits during early_identify_cpu()
In commit fbf6449f84bf ("x86/sev-es: Set x86_virt_bits to the correct
value straight away, instead of a two-phase approach"), the initialization
of c->x86_phys_bits was moved after this_cpu->c_early_init(c). This is
incorrect because early_init_amd() expected to be able to reduce the
value according to the contents of CPUID leaf 0x8000001f.
Fortunately, the bug was negated by init_amd()'s call to early_init_amd(),
which does reduce x86_phys_bits in the end. However, this is very
late in the boot process and, most notably, the wrong value is used for
x86_phys_bits when setting up MTRRs.
To fix this, call get_cpu_address_sizes() as soon as X86_FEATURE_CPUID is
set/cleared, and c->extended_cpuid_level is retrieved.
Fixes: fbf6449f84bf ("x86/sev-es: Set x86_virt_bits to the correct value straight away, instead of a two-phase approach")
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc:stable@vger.kernel.org
Link: https://lore.kernel.org/all/20240131230902.1867092-2-pbonzini%40redhat.com
2024-01-31 16:09:01 -07:00
|
|
|
get_cpu_address_sizes(c);
|
2020-09-21 14:56:38 -07:00
|
|
|
cpu_parse_early_param();
|
2006-12-06 18:14:08 -07:00
|
|
|
|
2024-02-13 14:04:01 -07:00
|
|
|
cpu_init_topology(c);
|
|
|
|
|
2016-09-28 16:06:33 -07:00
|
|
|
if (this_cpu->c_early_init)
|
|
|
|
this_cpu->c_early_init(c);
|
2008-09-04 12:09:43 -07:00
|
|
|
|
2016-09-28 16:06:33 -07:00
|
|
|
c->cpu_index = 0;
|
|
|
|
filter_cpuid_features(c, false);
|
2008-01-30 05:33:32 -07:00
|
|
|
|
2016-09-28 16:06:33 -07:00
|
|
|
if (this_cpu->c_bsp_init)
|
|
|
|
this_cpu->c_bsp_init(c);
|
2017-01-18 12:15:37 -07:00
|
|
|
} else {
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_CPUID);
|
x86/cpu: Allow reducing x86_phys_bits during early_identify_cpu()
In commit fbf6449f84bf ("x86/sev-es: Set x86_virt_bits to the correct
value straight away, instead of a two-phase approach"), the initialization
of c->x86_phys_bits was moved after this_cpu->c_early_init(c). This is
incorrect because early_init_amd() expected to be able to reduce the
value according to the contents of CPUID leaf 0x8000001f.
Fortunately, the bug was negated by init_amd()'s call to early_init_amd(),
which does reduce x86_phys_bits in the end. However, this is very
late in the boot process and, most notably, the wrong value is used for
x86_phys_bits when setting up MTRRs.
To fix this, call get_cpu_address_sizes() as soon as X86_FEATURE_CPUID is
set/cleared, and c->extended_cpuid_level is retrieved.
Fixes: fbf6449f84bf ("x86/sev-es: Set x86_virt_bits to the correct value straight away, instead of a two-phase approach")
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Cc:stable@vger.kernel.org
Link: https://lore.kernel.org/all/20240131230902.1867092-2-pbonzini%40redhat.com
2024-01-31 16:09:01 -07:00
|
|
|
get_cpu_address_sizes(c);
|
2024-02-13 14:04:01 -07:00
|
|
|
cpu_init_topology(c);
|
2016-09-28 16:06:33 -07:00
|
|
|
}
|
2013-06-09 03:07:30 -07:00
|
|
|
|
|
|
|
setup_force_cpu_cap(X86_FEATURE_ALWAYS);
|
2017-12-04 07:07:33 -07:00
|
|
|
|
2018-04-25 19:04:16 -07:00
|
|
|
cpu_set_bug_bits(c);
|
2018-01-06 04:49:23 -07:00
|
|
|
|
2021-03-22 06:53:24 -07:00
|
|
|
sld_setup(c);
|
2020-01-26 13:05:35 -07:00
|
|
|
|
2017-09-17 09:03:50 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
/*
|
|
|
|
* Regardless of whether PCID is enumerated, the SDM says
|
|
|
|
* that it can't be enabled in 32-bit mode.
|
|
|
|
*/
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_PCID);
|
|
|
|
#endif
|
2018-05-18 03:35:25 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Later in the boot process pgtable_l5_enabled() relies on
|
|
|
|
* cpu_feature_enabled(X86_FEATURE_LA57). If 5-level paging is not
|
|
|
|
* enabled by this point we need to clear the feature bit to avoid
|
|
|
|
* false-positives at the later stage.
|
|
|
|
*
|
|
|
|
* pgtable_l5_enabled() can be false here for several reasons:
|
|
|
|
* - 5-level paging is disabled compile-time;
|
|
|
|
* - it's 32-bit kernel;
|
|
|
|
* - machine doesn't support 5-level paging;
|
|
|
|
* - user specified 'no5lvl' in kernel command line.
|
|
|
|
*/
|
|
|
|
if (!pgtable_l5_enabled())
|
|
|
|
setup_clear_cpu_cap(X86_FEATURE_LA57);
|
2018-07-19 13:55:28 -07:00
|
|
|
|
2018-07-19 13:55:29 -07:00
|
|
|
detect_nopl();
|
2006-12-06 18:14:08 -07:00
|
|
|
}
|
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
void __init early_cpu_init(void)
|
|
|
|
{
|
2009-03-12 05:08:49 -07:00
|
|
|
const struct cpu_dev *const *cdev;
|
2008-09-04 12:09:45 -07:00
|
|
|
int count = 0;
|
|
|
|
|
2011-03-04 08:52:35 -07:00
|
|
|
#ifdef CONFIG_PROCESSOR_SELECT
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_info("KERNEL supported cpus:\n");
|
2009-11-14 02:34:41 -07:00
|
|
|
#endif
|
|
|
|
|
2008-09-04 12:09:45 -07:00
|
|
|
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
|
2009-03-12 05:08:49 -07:00
|
|
|
const struct cpu_dev *cpudev = *cdev;
|
2008-09-04 12:09:44 -07:00
|
|
|
|
2008-09-04 12:09:45 -07:00
|
|
|
if (count >= X86_VENDOR_NUM)
|
|
|
|
break;
|
|
|
|
cpu_devs[count] = cpudev;
|
|
|
|
count++;
|
|
|
|
|
2011-03-04 08:52:35 -07:00
|
|
|
#ifdef CONFIG_PROCESSOR_SELECT
|
2009-11-14 02:34:41 -07:00
|
|
|
{
|
|
|
|
unsigned int j;
|
|
|
|
|
|
|
|
for (j = 0; j < 2; j++) {
|
|
|
|
if (!cpudev->c_ident[j])
|
|
|
|
continue;
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_info(" %s %s\n", cpudev->c_vendor,
|
2009-11-14 02:34:41 -07:00
|
|
|
cpudev->c_ident[j]);
|
|
|
|
}
|
2008-09-04 12:09:45 -07:00
|
|
|
}
|
2009-11-13 13:30:00 -07:00
|
|
|
#endif
|
2008-09-04 12:09:45 -07:00
|
|
|
}
|
2008-09-04 12:09:44 -07:00
|
|
|
early_identify_cpu(&boot_cpu_data);
|
2006-12-06 18:14:08 -07:00
|
|
|
}
|
2008-01-30 05:33:32 -07:00
|
|
|
|
2021-10-21 03:47:44 -07:00
|
|
|
static bool detect_null_seg_behavior(void)
|
2016-04-07 17:31:46 -07:00
|
|
|
{
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-29 16:50:19 -07:00
|
|
|
/*
|
2016-04-07 17:31:46 -07:00
|
|
|
* Empirically, writing zero to a segment selector on AMD does
|
|
|
|
* not clear the base, whereas writing zero to a segment
|
|
|
|
* selector on Intel does clear the base. Intel's behavior
|
|
|
|
* allows slightly faster context switches in the common case
|
|
|
|
* where GS is unused by the prev and next threads.
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-29 16:50:19 -07:00
|
|
|
*
|
2016-04-07 17:31:46 -07:00
|
|
|
* Since neither vendor documents this anywhere that I can see,
|
2021-03-18 07:28:01 -07:00
|
|
|
* detect it directly instead of hard-coding the choice by
|
2016-04-07 17:31:46 -07:00
|
|
|
* vendor.
|
|
|
|
*
|
|
|
|
* I've designated AMD's behavior as the "bug" because it's
|
|
|
|
* counterintuitive and less friendly.
|
x86/entry/32: Introduce and use X86_BUG_ESPFIX instead of paravirt_enabled
x86_64 has very clean espfix handling on paravirt: espfix64 is set
up in native_iret, so paravirt systems that override iret bypass
espfix64 automatically. This is robust and straightforward.
x86_32 is messier. espfix is set up before the IRET paravirt patch
point, so it can't be directly conditionalized on whether we use
native_iret. We also can't easily move it into native_iret without
regressing performance due to a bizarre consideration. Specifically,
on 64-bit kernels, the logic is:
if (regs->ss & 0x4)
setup_espfix;
On 32-bit kernels, the logic is:
if ((regs->ss & 0x4) && (regs->cs & 0x3) == 3 &&
(regs->flags & X86_EFLAGS_VM) == 0)
setup_espfix;
The performance of setup_espfix itself is essentially irrelevant, but
the comparison happens on every IRET so its performance matters. On
x86_64, there's no need for any registers except flags to implement
the comparison, so we fold the whole thing into native_iret. On
x86_32, we don't do that because we need a free register to
implement the comparison efficiently. We therefore do espfix setup
before restoring registers on x86_32.
This patch gets rid of the explicit paravirt_enabled check by
introducing X86_BUG_ESPFIX on 32-bit systems and using an ALTERNATIVE
to skip espfix on paravirt systems where iret != native_iret. This is
also messy, but it's at least in line with other things we do.
This improves espfix performance by removing a branch, but no one
cares. More importantly, it removes a paravirt_enabled user, which is
good because paravirt_enabled is ill-defined and is going away.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Luis R. Rodriguez <mcgrof@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boris.ostrovsky@oracle.com
Cc: david.vrabel@citrix.com
Cc: konrad.wilk@oracle.com
Cc: lguest@lists.ozlabs.org
Cc: xen-devel@lists.xensource.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-02-29 16:50:19 -07:00
|
|
|
*/
|
2016-04-07 17:31:46 -07:00
|
|
|
|
|
|
|
unsigned long old_base, tmp;
|
|
|
|
rdmsrl(MSR_FS_BASE, old_base);
|
|
|
|
wrmsrl(MSR_FS_BASE, 1);
|
|
|
|
loadsegment(fs, 0);
|
|
|
|
rdmsrl(MSR_FS_BASE, tmp);
|
|
|
|
wrmsrl(MSR_FS_BASE, old_base);
|
2021-10-21 03:47:44 -07:00
|
|
|
return tmp == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void check_null_seg_clears_base(struct cpuinfo_x86 *c)
|
|
|
|
{
|
|
|
|
/* BUG_NULL_SEG is only relevant with 64bit userspace */
|
|
|
|
if (!IS_ENABLED(CONFIG_X86_64))
|
|
|
|
return;
|
|
|
|
|
2023-01-24 09:33:16 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_NULL_SEL_CLR_BASE))
|
2021-10-21 03:47:44 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* CPUID bit above wasn't set. If this kernel is still running
|
|
|
|
* as a HV guest, then the HV has decided not to advertize
|
|
|
|
* that CPUID bit for whatever reason. For example, one
|
|
|
|
* member of the migration pool might be vulnerable. Which
|
|
|
|
* means, the bug is present: set the BUG flag and return.
|
|
|
|
*/
|
|
|
|
if (cpu_has(c, X86_FEATURE_HYPERVISOR)) {
|
|
|
|
set_cpu_bug(c, X86_BUG_NULL_SEG);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Zen2 CPUs also have this behaviour, but no CPUID bit.
|
|
|
|
* 0x18 is the respective family for Hygon.
|
|
|
|
*/
|
|
|
|
if ((c->x86 == 0x17 || c->x86 == 0x18) &&
|
|
|
|
detect_null_seg_behavior())
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* All the remaining ones are affected */
|
|
|
|
set_cpu_bug(c, X86_BUG_NULL_SEG);
|
2006-12-06 18:14:08 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void generic_identify(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2008-09-14 02:33:15 -07:00
|
|
|
c->extended_cpuid_level = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
if (!have_cpuid_p())
|
2008-09-14 02:33:15 -07:00
|
|
|
identify_cpu_without_cpuid(c);
|
2007-07-11 12:18:32 -07:00
|
|
|
|
2008-09-14 02:33:15 -07:00
|
|
|
/* cyrix could have cpuid enabled via c_identify()*/
|
2008-09-14 05:46:58 -07:00
|
|
|
if (!have_cpuid_p())
|
2008-09-14 02:33:15 -07:00
|
|
|
return;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
cpu_detect(c);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
get_cpu_vendor(c);
|
2024-05-30 08:29:18 -07:00
|
|
|
intel_unlock_cpuid_leafs(c);
|
2008-09-04 12:09:44 -07:00
|
|
|
get_cpu_cap(c);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2018-04-10 02:27:04 -07:00
|
|
|
get_cpu_address_sizes(c);
|
|
|
|
|
2008-09-06 01:52:27 -07:00
|
|
|
get_model_name(c); /* Default name */
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2016-04-07 17:31:48 -07:00
|
|
|
/*
|
|
|
|
* ESPFIX is a strange bug. All real CPUs have it. Paravirt
|
|
|
|
* systems that run Linux at CPL > 0 may or may not have the
|
|
|
|
* issue, but, even if they have the issue, there's absolutely
|
|
|
|
* nothing we can do about it because we can't use the real IRET
|
|
|
|
* instruction.
|
|
|
|
*
|
|
|
|
* NB: For the time being, only 32-bit kernels support
|
|
|
|
* X86_BUG_ESPFIX as such. 64-bit kernels directly choose
|
|
|
|
* whether to apply espfix using paravirt hooks. If any
|
|
|
|
* non-paravirt system ever shows up that does *not* have the
|
|
|
|
* ESPFIX issue, we can change this.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
set_cpu_bug(c, X86_BUG_ESPFIX);
|
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This does the hard work of actually picking apart the CPU stuff...
|
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
static void identify_cpu(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
c->loops_per_jiffy = loops_per_jiffy;
|
2018-02-13 12:22:08 -07:00
|
|
|
c->x86_cache_size = 0;
|
2005-04-16 15:20:36 -07:00
|
|
|
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
2017-12-31 18:52:10 -07:00
|
|
|
c->x86_model = c->x86_stepping = 0; /* So far unknown... */
|
2005-04-16 15:20:36 -07:00
|
|
|
c->x86_vendor_id[0] = '\0'; /* Unset */
|
|
|
|
c->x86_model_id[0] = '\0'; /* Unset */
|
2008-09-07 17:58:50 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
2008-09-04 20:09:13 -07:00
|
|
|
c->x86_clflush_size = 64;
|
2009-03-12 05:37:34 -07:00
|
|
|
c->x86_phys_bits = 36;
|
|
|
|
c->x86_virt_bits = 48;
|
2008-09-04 20:09:13 -07:00
|
|
|
#else
|
|
|
|
c->cpuid_level = -1; /* CPUID not detected */
|
2006-12-06 18:14:05 -07:00
|
|
|
c->x86_clflush_size = 32;
|
2009-03-12 05:37:34 -07:00
|
|
|
c->x86_phys_bits = 32;
|
|
|
|
c->x86_virt_bits = 32;
|
2008-09-04 20:09:13 -07:00
|
|
|
#endif
|
|
|
|
c->x86_cache_alignment = c->x86_clflush_size;
|
2018-10-28 05:58:28 -07:00
|
|
|
memset(&c->x86_capability, 0, sizeof(c->x86_capability));
|
2019-12-20 21:45:04 -07:00
|
|
|
#ifdef CONFIG_X86_VMX_FEATURE_NAMES
|
|
|
|
memset(&c->vmx_capability, 0, sizeof(c->vmx_capability));
|
|
|
|
#endif
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
generic_identify(c);
|
|
|
|
|
2024-02-13 14:04:01 -07:00
|
|
|
cpu_parse_topology(c);
|
|
|
|
|
2008-01-30 05:32:49 -07:00
|
|
|
if (this_cpu->c_identify)
|
2005-04-16 15:20:36 -07:00
|
|
|
this_cpu->c_identify(c);
|
|
|
|
|
2016-02-23 16:34:30 -07:00
|
|
|
/* Clear/Set all flags overridden by options, after probe */
|
2017-01-18 12:15:38 -07:00
|
|
|
apply_forced_caps(c);
|
2009-05-15 13:05:16 -07:00
|
|
|
|
x86/barrier: Do not serialize MSR accesses on AMD
AMD does not have the requirement for a synchronization barrier when
acccessing a certain group of MSRs. Do not incur that unnecessary
penalty there.
There will be a CPUID bit which explicitly states that a MFENCE is not
needed. Once that bit is added to the APM, this will be extended with
it.
While at it, move to processor.h to avoid include hell. Untangling that
file properly is a matter for another day.
Some notes on the performance aspect of why this is relevant, courtesy
of Kishon VijayAbraham <Kishon.VijayAbraham@amd.com>:
On a AMD Zen4 system with 96 cores, a modified ipi-bench[1] on a VM
shows x2AVIC IPI rate is 3% to 4% lower than AVIC IPI rate. The
ipi-bench is modified so that the IPIs are sent between two vCPUs in the
same CCX. This also requires to pin the vCPU to a physical core to
prevent any latencies. This simulates the use case of pinning vCPUs to
the thread of a single CCX to avoid interrupt IPI latency.
In order to avoid run-to-run variance (for both x2AVIC and AVIC), the
below configurations are done:
1) Disable Power States in BIOS (to prevent the system from going to
lower power state)
2) Run the system at fixed frequency 2500MHz (to prevent the system
from increasing the frequency when the load is more)
With the above configuration:
*) Performance measured using ipi-bench for AVIC:
Average Latency: 1124.98ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 42.6759M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
*) Performance measured using ipi-bench for x2AVIC:
Average Latency: 1172.42ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 40.9432M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
From above, x2AVIC latency is ~4% more than AVIC. However, the expectation is
x2AVIC performance to be better or equivalent to AVIC. Upon analyzing
the perf captures, it is observed significant time is spent in
weak_wrmsr_fence() invoked by x2apic_send_IPI().
With the fix to skip weak_wrmsr_fence()
*) Performance measured using ipi-bench for x2AVIC:
Average Latency: 1117.44ns [Time to send IPI from one vCPU to another vCPU]
Cumulative throughput: 42.9608M/s [Total number of IPIs sent in a second from
48 vCPUs simultaneously]
Comparing the performance of x2AVIC with and without the fix, it can be seen
the performance improves by ~4%.
Performance captured using an unmodified ipi-bench using the 'mesh-ipi' option
with and without weak_wrmsr_fence() on a Zen4 system also showed significant
performance improvement without weak_wrmsr_fence(). The 'mesh-ipi' option ignores
CCX or CCD and just picks random vCPU.
Average throughput (10 iterations) with weak_wrmsr_fence(),
Cumulative throughput: 4933374 IPI/s
Average throughput (10 iterations) without weak_wrmsr_fence(),
Cumulative throughput: 6355156 IPI/s
[1] https://github.com/bytedance/kvm-utils/tree/master/microbenchmark/ipi-bench
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Link: https://lore.kernel.org/r/20230622095212.20940-1-bp@alien8.de
2023-10-27 05:24:16 -07:00
|
|
|
/*
|
|
|
|
* Set default APIC and TSC_DEADLINE MSR fencing flag. AMD and
|
|
|
|
* Hygon will clear it in ->c_init() below.
|
|
|
|
*/
|
|
|
|
set_cpu_cap(c, X86_FEATURE_APIC_MSRS_FENCE);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* Vendor-specific initialization. In this section we
|
|
|
|
* canonicalize the feature flags, meaning if there are
|
|
|
|
* features a certain CPU supports which CPUID doesn't
|
|
|
|
* tell us, CPUID claiming incorrect flags, or other bugs,
|
|
|
|
* we handle them here.
|
|
|
|
*
|
|
|
|
* At the end of this section, c->x86_capability better
|
|
|
|
* indicate the features this CPU genuinely supports!
|
|
|
|
*/
|
|
|
|
if (this_cpu->c_init)
|
|
|
|
this_cpu->c_init(c);
|
|
|
|
|
|
|
|
/* Disable the PN if appropriate */
|
|
|
|
squash_the_stupid_serial_number(c);
|
|
|
|
|
2017-11-05 19:27:54 -07:00
|
|
|
/* Set up SMEP/SMAP/UMIP */
|
2012-09-26 18:02:28 -07:00
|
|
|
setup_smep(c);
|
|
|
|
setup_smap(c);
|
2017-11-05 19:27:54 -07:00
|
|
|
setup_umip(c);
|
2012-09-26 18:02:28 -07:00
|
|
|
|
2020-05-28 13:13:48 -07:00
|
|
|
/* Enable FSGSBASE instructions if available. */
|
2020-05-28 13:13:59 -07:00
|
|
|
if (cpu_has(c, X86_FEATURE_FSGSBASE)) {
|
2020-05-28 13:13:58 -07:00
|
|
|
cr4_set_bits(X86_CR4_FSGSBASE);
|
2020-05-28 13:13:59 -07:00
|
|
|
elf_hwcap2 |= HWCAP2_FSGSBASE;
|
|
|
|
}
|
2020-05-28 13:13:48 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
2009-03-14 00:46:17 -07:00
|
|
|
* The vendor-specific functions might have changed features.
|
|
|
|
* Now we do "generic changes."
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
|
2009-01-23 18:20:50 -07:00
|
|
|
/* Filter out anything that depends on CPUID levels we don't have */
|
|
|
|
filter_cpuid_features(c, true);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/* If the model name is still unset, do table lookup. */
|
2008-02-24 03:58:13 -07:00
|
|
|
if (!c->x86_model_id[0]) {
|
2009-03-12 05:08:49 -07:00
|
|
|
const char *p;
|
2005-04-16 15:20:36 -07:00
|
|
|
p = table_lookup_model(c);
|
2008-02-24 03:58:13 -07:00
|
|
|
if (p)
|
2005-04-16 15:20:36 -07:00
|
|
|
strcpy(c->x86_model_id, p);
|
|
|
|
else
|
|
|
|
/* Last resort... */
|
|
|
|
sprintf(c->x86_model_id, "%02x/%02x",
|
2006-03-23 03:59:36 -07:00
|
|
|
c->x86, c->x86_model);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2011-07-31 14:02:19 -07:00
|
|
|
x86_init_rdrand(c);
|
2016-02-12 14:02:29 -07:00
|
|
|
setup_pku(c);
|
2022-03-08 08:30:35 -07:00
|
|
|
setup_cet(c);
|
2009-05-09 23:47:42 -07:00
|
|
|
|
|
|
|
/*
|
2016-02-23 16:34:30 -07:00
|
|
|
* Clear/Set all flags overridden by options, need do it
|
2009-05-09 23:47:42 -07:00
|
|
|
* before following smp all cpus cap AND.
|
|
|
|
*/
|
2017-01-18 12:15:38 -07:00
|
|
|
apply_forced_caps(c);
|
2009-05-09 23:47:42 -07:00
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/*
|
|
|
|
* On SMP, boot_cpu_data holds the common feature set between
|
|
|
|
* all CPUs; so make sure that we indicate which features are
|
|
|
|
* common between the CPUs. The first time this routine gets
|
|
|
|
* executed, c == &boot_cpu_data.
|
|
|
|
*/
|
2008-02-24 03:58:13 -07:00
|
|
|
if (c != &boot_cpu_data) {
|
2005-04-16 15:20:36 -07:00
|
|
|
/* AND the already accumulated flags with these */
|
2008-09-04 12:09:44 -07:00
|
|
|
for (i = 0; i < NCAPINTS; i++)
|
2005-04-16 15:20:36 -07:00
|
|
|
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
2013-03-20 07:07:23 -07:00
|
|
|
|
|
|
|
/* OR, i.e. replicate the bug flags */
|
|
|
|
for (i = NCAPINTS; i < NCAPINTS + NBUGINTS; i++)
|
|
|
|
c->x86_capability[i] |= boot_cpu_data.x86_capability[i];
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2022-01-31 16:01:07 -07:00
|
|
|
ppin_init(c);
|
|
|
|
|
2005-04-16 15:20:36 -07:00
|
|
|
/* Init Machine Check Exception if available. */
|
2009-10-16 03:31:32 -07:00
|
|
|
mcheck_cpu_init(c);
|
2008-01-30 05:33:16 -07:00
|
|
|
|
2011-01-23 06:37:41 -07:00
|
|
|
#ifdef CONFIG_NUMA
|
2008-09-04 20:09:13 -07:00
|
|
|
numa_add_cpu(smp_processor_id());
|
|
|
|
#endif
|
2007-05-02 10:27:12 -07:00
|
|
|
}
|
2005-11-07 01:58:42 -07:00
|
|
|
|
2015-03-16 02:32:20 -07:00
|
|
|
/*
|
|
|
|
* Set up the CPU state needed to execute SYSENTER/SYSEXIT instructions
|
|
|
|
* on 32-bit kernels:
|
|
|
|
*/
|
2014-05-05 12:19:33 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
|
|
|
void enable_sep_cpu(void)
|
|
|
|
{
|
2015-03-16 02:32:20 -07:00
|
|
|
struct tss_struct *tss;
|
|
|
|
int cpu;
|
2014-05-05 12:19:33 -07:00
|
|
|
|
2016-03-16 05:19:29 -07:00
|
|
|
if (!boot_cpu_has(X86_FEATURE_SEP))
|
|
|
|
return;
|
|
|
|
|
2015-03-16 02:32:20 -07:00
|
|
|
cpu = get_cpu();
|
2017-12-04 07:07:29 -07:00
|
|
|
tss = &per_cpu(cpu_tss_rw, cpu);
|
2015-03-16 02:32:20 -07:00
|
|
|
|
|
|
|
/*
|
2015-04-02 12:41:45 -07:00
|
|
|
* We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
|
|
|
|
* see the big comment in struct x86_hw_tss's definition.
|
2015-03-16 02:32:20 -07:00
|
|
|
*/
|
2014-05-05 12:19:33 -07:00
|
|
|
|
|
|
|
tss->x86_tss.ss1 = __KERNEL_CS;
|
2015-03-16 02:32:20 -07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_CS, tss->x86_tss.ss1, 0);
|
2017-12-04 18:25:07 -07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_ESP, (unsigned long)(cpu_entry_stack(cpu) + 1), 0);
|
2015-06-07 23:33:56 -07:00
|
|
|
wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
|
2015-03-16 02:32:20 -07:00
|
|
|
|
2014-05-05 12:19:33 -07:00
|
|
|
put_cpu();
|
|
|
|
}
|
2008-09-22 10:35:08 -07:00
|
|
|
#endif
|
|
|
|
|
2023-08-08 15:03:39 -07:00
|
|
|
static __init void identify_boot_cpu(void)
|
2007-05-02 10:27:12 -07:00
|
|
|
{
|
|
|
|
identify_cpu(&boot_cpu_data);
|
2022-03-08 08:30:35 -07:00
|
|
|
if (HAS_KERNEL_IBT && cpu_feature_enabled(X86_FEATURE_IBT))
|
|
|
|
pr_info("CET detected: Indirect Branch Tracking enabled\n");
|
2008-09-04 20:09:13 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
2005-06-25 14:54:53 -07:00
|
|
|
enable_sep_cpu();
|
2008-09-04 20:09:13 -07:00
|
|
|
#endif
|
2012-08-06 10:00:37 -07:00
|
|
|
cpu_detect_tlb(&boot_cpu_data);
|
2019-06-17 21:55:02 -07:00
|
|
|
setup_cr_pinning();
|
2019-10-23 02:01:53 -07:00
|
|
|
|
|
|
|
tsx_init();
|
2023-12-08 10:07:21 -07:00
|
|
|
tdx_init();
|
2023-01-12 00:20:32 -07:00
|
|
|
lkgs_init();
|
2007-05-02 10:27:12 -07:00
|
|
|
}
|
2005-07-07 17:56:38 -07:00
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void identify_secondary_cpu(struct cpuinfo_x86 *c)
|
2007-05-02 10:27:12 -07:00
|
|
|
{
|
|
|
|
BUG_ON(c == &boot_cpu_data);
|
|
|
|
identify_cpu(c);
|
2008-09-04 20:09:13 -07:00
|
|
|
#ifdef CONFIG_X86_32
|
2007-05-02 10:27:12 -07:00
|
|
|
enable_sep_cpu();
|
2008-09-04 20:09:13 -07:00
|
|
|
#endif
|
2018-04-25 19:04:22 -07:00
|
|
|
x86_spec_ctrl_setup_ap();
|
2020-04-16 08:54:04 -07:00
|
|
|
update_srbds_msr();
|
2023-07-12 19:43:11 -07:00
|
|
|
if (boot_cpu_has_bug(X86_BUG_GDS))
|
|
|
|
update_gds_msr();
|
2022-03-10 15:02:09 -07:00
|
|
|
|
|
|
|
tsx_ap_init();
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void print_cpu_info(struct cpuinfo_x86 *c)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
2009-03-12 05:08:49 -07:00
|
|
|
const char *vendor = NULL;
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2009-03-14 00:46:17 -07:00
|
|
|
if (c->x86_vendor < X86_VENDOR_NUM) {
|
2005-04-16 15:20:36 -07:00
|
|
|
vendor = this_cpu->c_vendor;
|
2009-03-14 00:46:17 -07:00
|
|
|
} else {
|
|
|
|
if (c->cpuid_level >= 0)
|
|
|
|
vendor = c->x86_vendor_id;
|
|
|
|
}
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-19 18:41:16 -07:00
|
|
|
if (vendor && !strstr(c->x86_model_id, vendor))
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_cont("%s ", vendor);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2008-09-04 12:09:44 -07:00
|
|
|
if (c->x86_model_id[0])
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_cont("%s", c->x86_model_id);
|
2005-04-16 15:20:36 -07:00
|
|
|
else
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_cont("%d86", c->x86);
|
2005-04-16 15:20:36 -07:00
|
|
|
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_cont(" (family: 0x%x, model: 0x%x", c->x86, c->x86_model);
|
2012-09-14 09:37:46 -07:00
|
|
|
|
2017-12-31 18:52:10 -07:00
|
|
|
if (c->x86_stepping || c->cpuid_level >= 0)
|
|
|
|
pr_cont(", stepping: 0x%x)\n", c->x86_stepping);
|
2005-04-16 15:20:36 -07:00
|
|
|
else
|
2016-02-01 20:45:02 -07:00
|
|
|
pr_cont(")\n");
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
|
|
|
|
2017-10-13 14:56:43 -07:00
|
|
|
/*
|
2021-06-23 05:01:32 -07:00
|
|
|
* clearcpuid= was already parsed in cpu_parse_early_param(). This dummy
|
|
|
|
* function prevents it from becoming an environment variable for init.
|
2017-10-13 14:56:43 -07:00
|
|
|
*/
|
|
|
|
static __init int setup_clearcpuid(char *arg)
|
2008-01-30 05:33:21 -07:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2017-10-13 14:56:43 -07:00
|
|
|
__setup("clearcpuid=", setup_clearcpuid);
|
2008-01-30 05:33:21 -07:00
|
|
|
|
2022-09-15 04:11:01 -07:00
|
|
|
DEFINE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot) = {
|
|
|
|
.current_task = &init_task,
|
2022-09-15 04:11:02 -07:00
|
|
|
.preempt_count = INIT_PREEMPT_COUNT,
|
2022-09-15 04:11:04 -07:00
|
|
|
.top_of_stack = TOP_OF_INIT_STACK,
|
2022-09-15 04:11:01 -07:00
|
|
|
};
|
|
|
|
EXPORT_PER_CPU_SYMBOL(pcpu_hot);
|
x86/percpu: Introduce const-qualified const_pcpu_hot to micro-optimize code generation
Some variables in pcpu_hot, currently current_task and top_of_stack
are actually per-thread variables implemented as per-CPU variables
and thus stable for the duration of the respective task. There is
already an attempt to eliminate redundant reads from these variables
using this_cpu_read_stable() asm macro, which hides the dependency
on the read memory address. However, the compiler has limited ability
to eliminate asm common subexpressions, so this approach results in a
limited success.
The solution is to allow more aggressive elimination by aliasing
pcpu_hot into a const-qualified const_pcpu_hot, and to read stable
per-CPU variables from this constant copy.
The current per-CPU infrastructure does not support reads from
const-qualified variables. However, when the compiler supports segment
qualifiers, it is possible to declare the const-aliased variable in
the relevant named address space. The compiler considers access to the
variable, declared in this way, as a read from a constant location,
and will optimize reads from the variable accordingly.
By implementing constant-qualified const_pcpu_hot, the compiler can
eliminate redundant reads from the constant variables, reducing the
number of loads from current_task from 3766 to 3217 on a test build,
a -14.6% reduction.
The reduction of loads translates to the following code savings:
text data bss dec hex filename
25,477,353 4389456 808452 30675261 1d4113d vmlinux-old.o
25,476,074 4389440 808452 30673966 1d40c2e vmlinux-new.o
representing a code size reduction of -1279 bytes.
[ mingo: Updated the changelog, EXPORT(const_pcpu_hot). ]
Co-developed-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Nadav Amit <namit@vmware.com>
Signed-off-by: Uros Bizjak <ubizjak@gmail.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20231020162004.135244-1-ubizjak@gmail.com
2023-10-20 09:19:20 -07:00
|
|
|
EXPORT_PER_CPU_SYMBOL(const_pcpu_hot);
|
2022-09-15 04:11:01 -07:00
|
|
|
|
2008-09-04 20:09:03 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
2019-04-14 09:00:06 -07:00
|
|
|
DEFINE_PER_CPU_FIRST(struct fixed_percpu_data,
|
|
|
|
fixed_percpu_data) __aligned(PAGE_SIZE) __visible;
|
|
|
|
EXPORT_PER_CPU_SYMBOL_GPL(fixed_percpu_data);
|
2009-03-14 00:46:17 -07:00
|
|
|
|
2021-11-18 20:58:03 -07:00
|
|
|
static void wrmsrl_cstar(unsigned long val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Intel CPUs do not support 32-bit SYSCALL. Writing to MSR_CSTAR
|
|
|
|
* is so far ignored by the CPU, but raises a #VE trap in a TDX
|
|
|
|
* guest. Avoid the pointless write on all Intel CPUs.
|
|
|
|
*/
|
|
|
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
|
|
|
wrmsrl(MSR_CSTAR, val);
|
|
|
|
}
|
|
|
|
|
2023-12-05 03:50:22 -07:00
|
|
|
static inline void idt_syscall_init(void)
|
2005-04-16 15:20:36 -07:00
|
|
|
{
|
x86/pti/64: Remove the SYSCALL64 entry trampoline
The SYSCALL64 trampoline has a couple of nice properties:
- The usual sequence of SWAPGS followed by two GS-relative accesses to
set up RSP is somewhat slow because the GS-relative accesses need
to wait for SWAPGS to finish. The trampoline approach allows
RIP-relative accesses to set up RSP, which avoids the stall.
- The trampoline avoids any percpu access before CR3 is set up,
which means that no percpu memory needs to be mapped in the user
page tables. This prevents using Meltdown to read any percpu memory
outside the cpu_entry_area and prevents using timing leaks
to directly locate the percpu areas.
The downsides of using a trampoline may outweigh the upsides, however.
It adds an extra non-contiguous I$ cache line to system calls, and it
forces an indirect jump to transfer control back to the normal kernel
text after CR3 is set up. The latter is because x86 lacks a 64-bit
direct jump instruction that could jump from the trampoline to the entry
text. With retpolines enabled, the indirect jump is extremely slow.
Change the code to map the percpu TSS into the user page tables to allow
the non-trampoline SYSCALL64 path to work under PTI. This does not add a
new direct information leak, since the TSS is readable by Meltdown from the
cpu_entry_area alias regardless. It does allow a timing attack to locate
the percpu area, but KASLR is more or less a lost cause against local
attack on CPUs vulnerable to Meltdown regardless. As far as I'm concerned,
on current hardware, KASLR is only useful to mitigate remote attacks that
try to attack the kernel without first gaining RCE against a vulnerable
user process.
On Skylake, with CONFIG_RETPOLINE=y and KPTI on, this reduces syscall
overhead from ~237ns to ~228ns.
There is a possible alternative approach: Move the trampoline within 2G of
the entry text and make a separate copy for each CPU. This would allow a
direct jump to rejoin the normal entry path. There are pro's and con's for
this approach:
+ It avoids a pipeline stall
- It executes from an extra page and read from another extra page during
the syscall. The latter is because it needs to use a relative
addressing mode to find sp1 -- it's the same *cacheline*, but accessed
using an alias, so it's an extra TLB entry.
- Slightly more memory. This would be one page per CPU for a simple
implementation and 64-ish bytes per CPU or one page per node for a more
complex implementation.
- More code complexity.
The current approach is chosen for simplicity and because the alternative
does not provide a significant benefit, which makes it worth.
[ tglx: Added the alternative discussion to the changelog ]
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: https://lkml.kernel.org/r/8c7c6e483612c3e4e10ca89495dc160b1aa66878.1536015544.git.luto@kernel.org
2018-09-03 15:59:44 -07:00
|
|
|
wrmsrl(MSR_LSTAR, (unsigned long)entry_SYSCALL_64);
|
2015-03-24 06:41:37 -07:00
|
|
|
|
2023-06-23 04:14:08 -07:00
|
|
|
if (ia32_enabled()) {
|
|
|
|
wrmsrl_cstar((unsigned long)entry_SYSCALL_compat);
|
|
|
|
/*
|
|
|
|
* This only works on Intel CPUs.
|
|
|
|
* On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
|
|
|
|
* This does not cause SYSENTER to jump to the wrong location, because
|
|
|
|
* AMD doesn't allow SYSENTER in long mode (either 32- or 64-bit).
|
|
|
|
*/
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP,
|
|
|
|
(unsigned long)(cpu_entry_stack(smp_processor_id()) + 1));
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
|
|
|
|
} else {
|
|
|
|
wrmsrl_cstar((unsigned long)entry_SYSCALL32_ignore);
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
|
|
|
|
wrmsrl_safe(MSR_IA32_SYSENTER_EIP, 0ULL);
|
|
|
|
}
|
x86: use ELF section to list CPU vendor specific code
Replace the hardcoded list of initialization functions for each CPU
vendor by a list in an ELF section, which is read at initialization in
arch/x86/kernel/cpu/cpu.c to fill the cpu_devs[] array. The ELF
section, named .x86cpuvendor.init, is reclaimed after boot, and
contains entries of type "struct cpu_vendor_dev" which associates a
vendor number with a pointer to a "struct cpu_dev" structure.
This first modification allows to remove all the VENDOR_init_cpu()
functions.
This patch also removes the hardcoded calls to early_init_amd() and
early_init_intel(). Instead, we add a "c_early_init" member to the
cpu_dev structure, which is then called if not NULL by the generic CPU
initialization code. Unfortunately, in early_cpu_detect(), this_cpu is
not yet set, so we have to use the cpu_devs[] array directly.
This patch is part of the Linux Tiny project, and is needed for
further patch that will allow to disable compilation of unused CPU
support code.
Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
2008-02-15 04:00:23 -07:00
|
|
|
|
x86/syscall: Maximize MSR_SYSCALL_MASK
It is better to clear as many flags as possible when we do a system
call entry, as opposed to the other way around. The fewer flags we
keep, the lesser the possible interference between the kernel and user
space.
The flags changed are:
- CF, PF, AF, ZF, SF, OF: these are arithmetic flags which affect
branches, possibly speculatively. They should be cleared for the same
reasons we now clear all GPRs on entry.
- RF: suppresses a code breakpoint on the subsequent instruction. It is
probably impossible to enter the kernel with RF set, but if it is
somehow not, it would break a kernel debugger setting a breakpoint on
the entry point. Either way, user space should not be able to control
kernel behavior here.
- ID: this flag has no direct effect (it is a scratch bit only.)
However, there is no reason to retain the user space value in the
kernel, and the standard should be to clear unless needed, not the
other way around.
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210510185316.3307264-5-hpa@zytor.com
2021-05-10 11:53:13 -07:00
|
|
|
/*
|
|
|
|
* Flags to clear on syscall; clear as much as possible
|
|
|
|
* to minimize user space-kernel interference.
|
|
|
|
*/
|
2008-09-04 20:09:03 -07:00
|
|
|
wrmsrl(MSR_SYSCALL_MASK,
|
x86/syscall: Maximize MSR_SYSCALL_MASK
It is better to clear as many flags as possible when we do a system
call entry, as opposed to the other way around. The fewer flags we
keep, the lesser the possible interference between the kernel and user
space.
The flags changed are:
- CF, PF, AF, ZF, SF, OF: these are arithmetic flags which affect
branches, possibly speculatively. They should be cleared for the same
reasons we now clear all GPRs on entry.
- RF: suppresses a code breakpoint on the subsequent instruction. It is
probably impossible to enter the kernel with RF set, but if it is
somehow not, it would break a kernel debugger setting a breakpoint on
the entry point. Either way, user space should not be able to control
kernel behavior here.
- ID: this flag has no direct effect (it is a scratch bit only.)
However, there is no reason to retain the user space value in the
kernel, and the standard should be to clear unless needed, not the
other way around.
Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210510185316.3307264-5-hpa@zytor.com
2021-05-10 11:53:13 -07:00
|
|
|
X86_EFLAGS_CF|X86_EFLAGS_PF|X86_EFLAGS_AF|
|
|
|
|
X86_EFLAGS_ZF|X86_EFLAGS_SF|X86_EFLAGS_TF|
|
|
|
|
X86_EFLAGS_IF|X86_EFLAGS_DF|X86_EFLAGS_OF|
|
|
|
|
X86_EFLAGS_IOPL|X86_EFLAGS_NT|X86_EFLAGS_RF|
|
|
|
|
X86_EFLAGS_AC|X86_EFLAGS_ID);
|
2005-04-16 15:20:36 -07:00
|
|
|
}
|
2006-12-06 18:14:02 -07:00
|
|
|
|
2023-12-05 03:50:22 -07:00
|
|
|
/* May not be marked __init: used by software suspend */
|
|
|
|
void syscall_init(void)
|
|
|
|
{
|
|
|
|
/* The default user and kernel segments */
|
|
|
|
wrmsr(MSR_STAR, 0, (__USER32_CS << 16) | __KERNEL_CS);
|
|
|
|
|
2023-12-05 03:50:24 -07:00
|
|
|
/*
|
|
|
|
* Except the IA32_STAR MSR, there is NO need to setup SYSCALL and
|
|
|
|
* SYSENTER MSRs for FRED, because FRED uses the ring 3 FRED
|
|
|
|
* entrypoint for SYSCALL and SYSENTER, and ERETU is the only legit
|
|
|
|
* instruction to return to ring 3 (both sysexit and sysret cause
|
|
|
|
* #UD when FRED is enabled).
|
|
|
|
*/
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_FRED))
|
|
|
|
idt_syscall_init();
|
2023-12-05 03:50:22 -07:00
|
|
|
}
|
|
|
|
|
2009-03-14 00:46:17 -07:00
|
|
|
#else /* CONFIG_X86_64 */
|
2008-09-04 20:09:03 -07:00
|
|
|
|
Kbuild: rename CC_STACKPROTECTOR[_STRONG] config variables
The changes to automatically test for working stack protector compiler
support in the Kconfig files removed the special STACKPROTECTOR_AUTO
option that picked the strongest stack protector that the compiler
supported.
That was all a nice cleanup - it makes no sense to have the AUTO case
now that the Kconfig phase can just determine the compiler support
directly.
HOWEVER.
It also meant that doing "make oldconfig" would now _disable_ the strong
stackprotector if you had AUTO enabled, because in a legacy config file,
the sane stack protector configuration would look like
CONFIG_HAVE_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_NONE is not set
# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_STACKPROTECTOR_AUTO=y
and when you ran this through "make oldconfig" with the Kbuild changes,
it would ask you about the regular CONFIG_CC_STACKPROTECTOR (that had
been renamed from CONFIG_CC_STACKPROTECTOR_REGULAR to just
CONFIG_CC_STACKPROTECTOR), but it would think that the STRONG version
used to be disabled (because it was really enabled by AUTO), and would
disable it in the new config, resulting in:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_CC_STACKPROTECTOR=y
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
That's dangerously subtle - people could suddenly find themselves with
the weaker stack protector setup without even realizing.
The solution here is to just rename not just the old RECULAR stack
protector option, but also the strong one. This does that by just
removing the CC_ prefix entirely for the user choices, because it really
is not about the compiler support (the compiler support now instead
automatially impacts _visibility_ of the options to users).
This results in "make oldconfig" actually asking the user for their
choice, so that we don't have any silent subtle security model changes.
The end result would generally look like this:
CONFIG_HAVE_CC_STACKPROTECTOR=y
CONFIG_CC_HAS_STACKPROTECTOR_NONE=y
CONFIG_STACKPROTECTOR=y
CONFIG_STACKPROTECTOR_STRONG=y
CONFIG_CC_HAS_SANE_STACKPROTECTOR=y
where the "CC_" versions really are about internal compiler
infrastructure, not the user selections.
Acked-by: Masahiro Yamada <yamada.masahiro@socionext.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-06-13 20:21:18 -07:00
|
|
|
#ifdef CONFIG_STACKPROTECTOR
|
x86/stackprotector/32: Make the canary into a regular percpu variable
On 32-bit kernels, the stackprotector canary is quite nasty -- it is
stored at %gs:(20), which is nasty because 32-bit kernels use %fs for
percpu storage. It's even nastier because it means that whether %gs
contains userspace state or kernel state while running kernel code
depends on whether stackprotector is enabled (this is
CONFIG_X86_32_LAZY_GS), and this setting radically changes the way
that segment selectors work. Supporting both variants is a
maintenance and testing mess.
Merely rearranging so that percpu and the stack canary
share the same segment would be messy as the 32-bit percpu address
layout isn't currently compatible with putting a variable at a fixed
offset.
Fortunately, GCC 8.1 added options that allow the stack canary to be
accessed as %fs:__stack_chk_guard, effectively turning it into an ordinary
percpu variable. This lets us get rid of all of the code to manage the
stack canary GDT descriptor and the CONFIG_X86_32_LAZY_GS mess.
(That name is special. We could use any symbol we want for the
%fs-relative mode, but for CONFIG_SMP=n, gcc refuses to let us use any
name other than __stack_chk_guard.)
Forcibly disable stackprotector on older compilers that don't support
the new options and turn the stack canary into a percpu variable. The
"lazy GS" approach is now used for all 32-bit configurations.
Also makes load_gs_index() work on 32-bit kernels. On 64-bit kernels,
it loads the GS selector and updates the user GSBASE accordingly. (This
is unchanged.) On 32-bit kernels, it loads the GS selector and updates
GSBASE, which is now always the user base. This means that the overall
effect is the same on 32-bit and 64-bit, which avoids some ifdeffery.
[ bp: Massage commit message. ]
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/c0ff7dba14041c7e5d1cae5d4df052f03759bef3.1613243844.git.luto@kernel.org
2021-02-13 12:19:44 -07:00
|
|
|
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
|
x86/stackprotector: Work around strict Clang TLS symbol requirements
GCC and Clang both implement stack protector support based on Thread Local
Storage (TLS) variables, and this is used in the kernel to implement per-task
stack cookies, by copying a task's stack cookie into a per-CPU variable every
time it is scheduled in.
Both now also implement -mstack-protector-guard-symbol=, which permits the TLS
variable to be specified directly. This is useful because it will allow to
move away from using a fixed offset of 40 bytes into the per-CPU area on
x86_64, which requires a lot of special handling in the per-CPU code and the
runtime relocation code.
However, while GCC is rather lax in its implementation of this command line
option, Clang actually requires that the provided symbol name refers to a TLS
variable (i.e., one declared with __thread), although it also permits the
variable to be undeclared entirely, in which case it will use an implicit
declaration of the right type.
The upshot of this is that Clang will emit the correct references to the stack
cookie variable in most cases, e.g.,
10d: 64 a1 00 00 00 00 mov %fs:0x0,%eax
10f: R_386_32 __stack_chk_guard
However, if a non-TLS definition of the symbol in question is visible in the
same compilation unit (which amounts to the whole of vmlinux if LTO is
enabled), it will drop the per-CPU prefix and emit a load from a bogus
address.
Work around this by using a symbol name that never occurs in C code, and emit
it as an alias in the linker script.
Fixes: 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular percpu variable")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Cc: stable@vger.kernel.org
Link: https://github.com/ClangBuiltLinux/linux/issues/1854
Link: https://lore.kernel.org/r/20241105155801.1779119-2-brgerst@gmail.com
2024-11-05 08:57:46 -07:00
|
|
|
#ifndef CONFIG_SMP
|
x86/stackprotector/32: Make the canary into a regular percpu variable
On 32-bit kernels, the stackprotector canary is quite nasty -- it is
stored at %gs:(20), which is nasty because 32-bit kernels use %fs for
percpu storage. It's even nastier because it means that whether %gs
contains userspace state or kernel state while running kernel code
depends on whether stackprotector is enabled (this is
CONFIG_X86_32_LAZY_GS), and this setting radically changes the way
that segment selectors work. Supporting both variants is a
maintenance and testing mess.
Merely rearranging so that percpu and the stack canary
share the same segment would be messy as the 32-bit percpu address
layout isn't currently compatible with putting a variable at a fixed
offset.
Fortunately, GCC 8.1 added options that allow the stack canary to be
accessed as %fs:__stack_chk_guard, effectively turning it into an ordinary
percpu variable. This lets us get rid of all of the code to manage the
stack canary GDT descriptor and the CONFIG_X86_32_LAZY_GS mess.
(That name is special. We could use any symbol we want for the
%fs-relative mode, but for CONFIG_SMP=n, gcc refuses to let us use any
name other than __stack_chk_guard.)
Forcibly disable stackprotector on older compilers that don't support
the new options and turn the stack canary into a percpu variable. The
"lazy GS" approach is now used for all 32-bit configurations.
Also makes load_gs_index() work on 32-bit kernels. On 64-bit kernels,
it loads the GS selector and updates the user GSBASE accordingly. (This
is unchanged.) On 32-bit kernels, it loads the GS selector and updates
GSBASE, which is now always the user base. This means that the overall
effect is the same on 32-bit and 64-bit, which avoids some ifdeffery.
[ bp: Massage commit message. ]
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: https://lkml.kernel.org/r/c0ff7dba14041c7e5d1cae5d4df052f03759bef3.1613243844.git.luto@kernel.org
2021-02-13 12:19:44 -07:00
|
|
|
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
|
2009-02-09 06:17:40 -07:00
|
|
|
#endif
|
x86/stackprotector: Work around strict Clang TLS symbol requirements
GCC and Clang both implement stack protector support based on Thread Local
Storage (TLS) variables, and this is used in the kernel to implement per-task
stack cookies, by copying a task's stack cookie into a per-CPU variable every
time it is scheduled in.
Both now also implement -mstack-protector-guard-symbol=, which permits the TLS
variable to be specified directly. This is useful because it will allow to
move away from using a fixed offset of 40 bytes into the per-CPU area on
x86_64, which requires a lot of special handling in the per-CPU code and the
runtime relocation code.
However, while GCC is rather lax in its implementation of this command line
option, Clang actually requires that the provided symbol name refers to a TLS
variable (i.e., one declared with __thread), although it also permits the
variable to be undeclared entirely, in which case it will use an implicit
declaration of the right type.
The upshot of this is that Clang will emit the correct references to the stack
cookie variable in most cases, e.g.,
10d: 64 a1 00 00 00 00 mov %fs:0x0,%eax
10f: R_386_32 __stack_chk_guard
However, if a non-TLS definition of the symbol in question is visible in the
same compilation unit (which amounts to the whole of vmlinux if LTO is
enabled), it will drop the per-CPU prefix and emit a load from a bogus
address.
Work around this by using a symbol name that never occurs in C code, and emit
it as an alias in the linker script.
Fixes: 3fb0fdb3bbe7 ("x86/stackprotector/32: Make the canary into a regular percpu variable")
Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Tested-by: Nathan Chancellor <nathan@kernel.org>
Cc: stable@vger.kernel.org
Link: https://github.com/ClangBuiltLinux/linux/issues/1854
Link: https://lore.kernel.org/r/20241105155801.1779119-2-brgerst@gmail.com
2024-11-05 08:57:46 -07:00
|
|
|
#endif
|
2008-09-04 20:09:03 -07:00
|
|
|
|
2009-03-14 00:46:17 -07:00
|
|
|
#endif /* CONFIG_X86_64 */
|
2007-05-02 10:27:16 -07:00
|
|
|
|
2009-03-13 22:49:49 -07:00
|
|
|
/*
|
|
|
|
* Clear all 6 debug registers:
|
|
|
|
*/
|
|
|
|
static void clear_all_debug_regs(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 8; i++) {
|
|
|
|
/* Ignore db4, db5 */
|
|
|
|
if ((i == 4) || (i == 5))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
set_debugreg(0, i);
|
|
|
|
}
|
|
|
|
}
|
2007-05-02 10:27:16 -07:00
|
|
|
|
2010-05-20 19:04:30 -07:00
|
|
|
#ifdef CONFIG_KGDB
|
|
|
|
/*
|
|
|
|
* Restore debug regs if using kgdbwait and you have a kernel debugger
|
|
|
|
* connection established.
|
|
|
|
*/
|
|
|
|
static void dbg_restore_debug_regs(void)
|
|
|
|
{
|
|
|
|
if (unlikely(kgdb_connected && arch_kgdb_ops.correct_hw_break))
|
|
|
|
arch_kgdb_ops.correct_hw_break();
|
|
|
|
}
|
|
|
|
#else /* ! CONFIG_KGDB */
|
|
|
|
#define dbg_restore_debug_regs()
|
|
|
|
#endif /* ! CONFIG_KGDB */
|
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
static inline void setup_getcpu(int cpu)
|
2018-09-18 16:08:59 -07:00
|
|
|
{
|
x86/segments: Introduce the 'CPUNODE' naming to better document the segment limit CPU/node NR trick
We have a special segment descriptor entry in the GDT, whose sole purpose is to
encode the CPU and node numbers in its limit (size) field. There are user-space
instructions that allow the reading of the limit field, which gives us a really
fast way to read the CPU and node IDs from the vDSO for example.
But the naming of related functionality does not make this clear, at all:
VDSO_CPU_SIZE
VDSO_CPU_MASK
__CPU_NUMBER_SEG
GDT_ENTRY_CPU_NUMBER
vdso_encode_cpu_node
vdso_read_cpu_node
There's a number of problems:
- The 'VDSO_CPU_SIZE' doesn't really make it clear that these are number
of bits, nor does it make it clear which 'CPU' this refers to, i.e.
that this is about a GDT entry whose limit encodes the CPU and node number.
- Furthermore, the 'CPU_NUMBER' naming is actively misleading as well,
because the segment limit encodes not just the CPU number but the
node ID as well ...
So use a better nomenclature all around: name everything related to this trick
as 'CPUNODE', to make it clear that this is something special, and add
_BITS to make it clear that these are number of bits, and propagate this to
every affected name:
VDSO_CPU_SIZE => VDSO_CPUNODE_BITS
VDSO_CPU_MASK => VDSO_CPUNODE_MASK
__CPU_NUMBER_SEG => __CPUNODE_SEG
GDT_ENTRY_CPU_NUMBER => GDT_ENTRY_CPUNODE
vdso_encode_cpu_node => vdso_encode_cpunode
vdso_read_cpu_node => vdso_read_cpunode
This, beyond being less confusing, also makes it easier to grep for all related
functionality:
$ git grep -i cpunode arch/x86
Also, while at it, fix "return is not a function" style sloppiness in vdso_encode_cpunode().
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Markus T Metzger <markus.t.metzger@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1537312139-5580-2-git-send-email-chang.seok.bae@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-10-08 01:41:59 -07:00
|
|
|
unsigned long cpudata = vdso_encode_cpunode(cpu, early_cpu_to_node(cpu));
|
2018-09-18 16:08:59 -07:00
|
|
|
struct desc_struct d = { };
|
|
|
|
|
2021-05-04 15:56:31 -07:00
|
|
|
if (boot_cpu_has(X86_FEATURE_RDTSCP) || boot_cpu_has(X86_FEATURE_RDPID))
|
2021-05-04 15:56:32 -07:00
|
|
|
wrmsr(MSR_TSC_AUX, cpudata, 0);
|
2018-09-18 16:08:59 -07:00
|
|
|
|
|
|
|
/* Store CPU and node number in limit. */
|
|
|
|
d.limit0 = cpudata;
|
|
|
|
d.limit1 = cpudata >> 16;
|
|
|
|
|
|
|
|
d.type = 5; /* RO data, expand down, accessed */
|
|
|
|
d.dpl = 3; /* Visible to user code */
|
|
|
|
d.s = 1; /* Not a system segment */
|
|
|
|
d.p = 1; /* Present */
|
|
|
|
d.d = 1; /* 32-bit */
|
|
|
|
|
x86/segments: Introduce the 'CPUNODE' naming to better document the segment limit CPU/node NR trick
We have a special segment descriptor entry in the GDT, whose sole purpose is to
encode the CPU and node numbers in its limit (size) field. There are user-space
instructions that allow the reading of the limit field, which gives us a really
fast way to read the CPU and node IDs from the vDSO for example.
But the naming of related functionality does not make this clear, at all:
VDSO_CPU_SIZE
VDSO_CPU_MASK
__CPU_NUMBER_SEG
GDT_ENTRY_CPU_NUMBER
vdso_encode_cpu_node
vdso_read_cpu_node
There's a number of problems:
- The 'VDSO_CPU_SIZE' doesn't really make it clear that these are number
of bits, nor does it make it clear which 'CPU' this refers to, i.e.
that this is about a GDT entry whose limit encodes the CPU and node number.
- Furthermore, the 'CPU_NUMBER' naming is actively misleading as well,
because the segment limit encodes not just the CPU number but the
node ID as well ...
So use a better nomenclature all around: name everything related to this trick
as 'CPUNODE', to make it clear that this is something special, and add
_BITS to make it clear that these are number of bits, and propagate this to
every affected name:
VDSO_CPU_SIZE => VDSO_CPUNODE_BITS
VDSO_CPU_MASK => VDSO_CPUNODE_MASK
__CPU_NUMBER_SEG => __CPUNODE_SEG
GDT_ENTRY_CPU_NUMBER => GDT_ENTRY_CPUNODE
vdso_encode_cpu_node => vdso_encode_cpunode
vdso_read_cpu_node => vdso_read_cpunode
This, beyond being less confusing, also makes it easier to grep for all related
functionality:
$ git grep -i cpunode arch/x86
Also, while at it, fix "return is not a function" style sloppiness in vdso_encode_cpunode().
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Chang S. Bae <chang.seok.bae@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Markus T Metzger <markus.t.metzger@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Shankar <ravi.v.shankar@intel.com>
Cc: Rik van Riel <riel@surriel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Link: http://lkml.kernel.org/r/1537312139-5580-2-git-send-email-chang.seok.bae@intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-10-08 01:41:59 -07:00
|
|
|
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_CPUNODE, &d, DESCTYPE_S);
|
2018-09-18 16:08:59 -07:00
|
|
|
}
|
2019-11-11 15:03:17 -07:00
|
|
|
|
2022-11-25 02:42:14 -07:00
|
|
|
#ifdef CONFIG_X86_64
|
2019-11-11 15:03:17 -07:00
|
|
|
static inline void tss_setup_ist(struct tss_struct *tss)
|
|
|
|
{
|
|
|
|
/* Set up the per-CPU TSS IST stacks */
|
|
|
|
tss->x86_tss.ist[IST_INDEX_DF] = __this_cpu_ist_top_va(DF);
|
|
|
|
tss->x86_tss.ist[IST_INDEX_NMI] = __this_cpu_ist_top_va(NMI);
|
|
|
|
tss->x86_tss.ist[IST_INDEX_DB] = __this_cpu_ist_top_va(DB);
|
|
|
|
tss->x86_tss.ist[IST_INDEX_MCE] = __this_cpu_ist_top_va(MCE);
|
2020-09-07 06:15:43 -07:00
|
|
|
/* Only mapped when SEV-ES is active */
|
|
|
|
tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC);
|
2019-11-11 15:03:17 -07:00
|
|
|
}
|
|
|
|
#else /* CONFIG_X86_64 */
|
|
|
|
static inline void tss_setup_ist(struct tss_struct *tss) { }
|
|
|
|
#endif /* !CONFIG_X86_64 */
|
2018-09-18 16:08:59 -07:00
|
|
|
|
2019-11-12 13:40:33 -07:00
|
|
|
static inline void tss_setup_io_bitmap(struct tss_struct *tss)
|
|
|
|
{
|
|
|
|
tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET_INVALID;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_IOPL_IOPERM
|
|
|
|
tss->io_bitmap.prev_max = 0;
|
|
|
|
tss->io_bitmap.prev_sequence = 0;
|
|
|
|
memset(tss->io_bitmap.bitmap, 0xff, sizeof(tss->io_bitmap.bitmap));
|
|
|
|
/*
|
|
|
|
* Invalidate the extra array entry past the end of the all
|
|
|
|
* permission bitmap as required by the hardware.
|
|
|
|
*/
|
|
|
|
tss->io_bitmap.mapall[IO_BITMAP_LONGS] = ~0UL;
|
2018-09-18 16:08:59 -07:00
|
|
|
#endif
|
2019-11-12 13:40:33 -07:00
|
|
|
}
|
2018-09-18 16:08:59 -07:00
|
|
|
|
2020-09-07 06:16:08 -07:00
|
|
|
/*
|
|
|
|
* Setup everything needed to handle exceptions from the IDT, including the IST
|
|
|
|
* exceptions which use paranoid_entry().
|
|
|
|
*/
|
2024-07-09 08:40:48 -07:00
|
|
|
void cpu_init_exception_handling(bool boot_cpu)
|
2020-09-07 06:16:08 -07:00
|
|
|
{
|
|
|
|
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
|
|
|
|
/* paranoid_entry() gets the CPU number from the GDT */
|
|
|
|
setup_getcpu(cpu);
|
|
|
|
|
2023-12-05 03:50:24 -07:00
|
|
|
/* For IDT mode, IST vectors need to be set in TSS. */
|
|
|
|
if (!cpu_feature_enabled(X86_FEATURE_FRED))
|
|
|
|
tss_setup_ist(tss);
|
2020-09-07 06:16:08 -07:00
|
|
|
tss_setup_io_bitmap(tss);
|
|
|
|
set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
|
|
|
|
|
|
|
|
load_TR_desc();
|
|
|
|
|
2022-02-09 11:10:11 -07:00
|
|
|
/* GHCB needs to be setup to handle #VC. */
|
|
|
|
setup_ghcb();
|
|
|
|
|
2024-07-09 08:40:47 -07:00
|
|
|
if (cpu_feature_enabled(X86_FEATURE_FRED)) {
|
2024-07-09 08:40:48 -07:00
|
|
|
/* The boot CPU has enabled FRED during early boot */
|
|
|
|
if (!boot_cpu)
|
|
|
|
cpu_init_fred_exceptions();
|
|
|
|
|
2024-07-09 08:40:47 -07:00
|
|
|
cpu_init_fred_rsps();
|
|
|
|
} else {
|
2023-12-05 03:50:24 -07:00
|
|
|
load_current_idt();
|
2024-07-09 08:40:47 -07:00
|
|
|
}
|
2020-09-07 06:16:08 -07:00
|
|
|
}
|
|
|
|
|
2024-07-09 08:40:48 -07:00
|
|
|
void __init cpu_init_replace_early_idt(void)
|
|
|
|
{
|
2023-12-05 03:50:24 -07:00
|
|
|
if (cpu_feature_enabled(X86_FEATURE_FRED))
|
|
|
|
cpu_init_fred_exceptions();
|
|
|
|
else
|
2024-07-09 08:40:48 -07:00
|
|
|
idt_setup_early_pf();
|
2020-09-07 06:16:08 -07:00
|
|
|
}
|
|
|
|
|
2007-05-02 10:27:10 -07:00
|
|
|
/*
|
|
|
|
* cpu_init() initializes state that is per-CPU. Some data is already
|
2021-05-10 14:29:25 -07:00
|
|
|
* initialized (naturally) in the bootstrap process, such as the GDT. We
|
|
|
|
* reload it nevertheless, this function acts as a 'CPU state barrier',
|
|
|
|
* nothing should get across.
|
2007-05-02 10:27:10 -07:00
|
|
|
*/
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-18 15:23:59 -07:00
|
|
|
void cpu_init(void)
|
2008-09-04 20:09:04 -07:00
|
|
|
{
|
2019-11-11 15:03:17 -07:00
|
|
|
struct task_struct *cur = current;
|
2019-04-14 08:59:53 -07:00
|
|
|
int cpu = raw_smp_processor_id();
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2009-01-18 08:38:59 -07:00
|
|
|
#ifdef CONFIG_NUMA
|
2012-11-13 12:32:47 -07:00
|
|
|
if (this_cpu_read(numa_node) == 0 &&
|
2010-05-26 14:44:58 -07:00
|
|
|
early_cpu_to_node(cpu) != NUMA_NO_NODE)
|
|
|
|
set_numa_node(early_cpu_to_node(cpu));
|
2009-01-18 08:38:59 -07:00
|
|
|
#endif
|
2009-12-10 18:19:36 -07:00
|
|
|
pr_debug("Initializing CPU#%d\n", cpu);
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
if (IS_ENABLED(CONFIG_X86_64) || cpu_feature_enabled(X86_FEATURE_VME) ||
|
|
|
|
boot_cpu_has(X86_FEATURE_TSC) || boot_cpu_has(X86_FEATURE_DE))
|
|
|
|
cr4_clear_bits(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
if (IS_ENABLED(CONFIG_X86_64)) {
|
|
|
|
loadsegment(fs, 0);
|
|
|
|
memset(cur->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
|
|
|
|
syscall_init();
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
wrmsrl(MSR_FS_BASE, 0);
|
|
|
|
wrmsrl(MSR_KERNEL_GS_BASE, 0);
|
|
|
|
barrier();
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
x2apic_setup();
|
2024-04-23 10:41:08 -07:00
|
|
|
|
|
|
|
intel_posted_msi_init();
|
2008-09-04 20:09:04 -07:00
|
|
|
}
|
|
|
|
|
2017-02-27 15:30:07 -07:00
|
|
|
mmgrab(&init_mm);
|
2019-11-11 15:03:17 -07:00
|
|
|
cur->active_mm = &init_mm;
|
|
|
|
BUG_ON(cur->mm);
|
2017-09-06 19:54:53 -07:00
|
|
|
initialize_tlbstate_and_flush();
|
2019-11-11 15:03:17 -07:00
|
|
|
enter_lazy_tlb(&init_mm, cur);
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2019-11-11 15:03:17 -07:00
|
|
|
/*
|
|
|
|
* sp0 points to the entry trampoline stack regardless of what task
|
|
|
|
* is running.
|
|
|
|
*/
|
2017-12-04 18:25:07 -07:00
|
|
|
load_sp0((unsigned long)(cpu_entry_stack(cpu) + 1));
|
2017-11-02 00:59:13 -07:00
|
|
|
|
2015-07-30 14:31:32 -07:00
|
|
|
load_mm_ldt(&init_mm);
|
2008-09-04 20:09:04 -07:00
|
|
|
|
2010-05-20 19:04:30 -07:00
|
|
|
clear_all_debug_regs();
|
|
|
|
dbg_restore_debug_regs();
|
2008-09-04 20:09:04 -07:00
|
|
|
|
x86/doublefault/32: Move #DF stack and TSS to cpu_entry_area
There are three problems with the current layout of the doublefault
stack and TSS. First, the TSS is only cacheline-aligned, which is
not enough -- if the hardware portion of the TSS (struct x86_hw_tss)
crosses a page boundary, horrible things happen [0]. Second, the
stack and TSS are global, so simultaneous double faults on different
CPUs will cause massive corruption. Third, the whole mechanism
won't work if user CR3 is loaded, resulting in a triple fault [1].
Let the doublefault stack and TSS share a page (which prevents the
TSS from spanning a page boundary), make it percpu, and move it into
cpu_entry_area. Teach the stack dump code about the doublefault
stack.
[0] Real hardware will read past the end of the page onto the next
*physical* page if a task switch happens. Virtual machines may
have any number of bugs, and I would consider it reasonable for
a VM to summarily kill the guest if it tries to task-switch to
a page-spanning TSS.
[1] Real hardware triple faults. At least some VMs seem to hang.
I'm not sure what's going on.
Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2019-11-26 10:27:16 -07:00
|
|
|
doublefault_init_cpu_tss();
|
2019-11-11 15:03:17 -07:00
|
|
|
|
2008-09-04 20:09:04 -07:00
|
|
|
if (is_uv_system())
|
|
|
|
uv_cpu_init();
|
2017-03-14 10:05:07 -07:00
|
|
|
|
|
|
|
load_fixmap_gdt(cpu);
|
2008-09-04 20:09:04 -07:00
|
|
|
}
|
|
|
|
|
2022-05-25 09:12:30 -07:00
|
|
|
#ifdef CONFIG_MICROCODE_LATE_LOADING
|
2023-01-09 08:35:51 -07:00
|
|
|
/**
|
|
|
|
* store_cpu_caps() - Store a snapshot of CPU capabilities
|
|
|
|
* @curr_info: Pointer where to store it
|
|
|
|
*
|
|
|
|
* Returns: None
|
|
|
|
*/
|
|
|
|
void store_cpu_caps(struct cpuinfo_x86 *curr_info)
|
|
|
|
{
|
|
|
|
/* Reload CPUID max function as it might've changed. */
|
|
|
|
curr_info->cpuid_level = cpuid_eax(0);
|
|
|
|
|
|
|
|
/* Copy all capability leafs and pick up the synthetic ones. */
|
|
|
|
memcpy(&curr_info->x86_capability, &boot_cpu_data.x86_capability,
|
|
|
|
sizeof(curr_info->x86_capability));
|
|
|
|
|
|
|
|
/* Get the hardware CPUID leafs */
|
|
|
|
get_cpu_cap(curr_info);
|
|
|
|
}
|
|
|
|
|
2023-01-09 08:35:50 -07:00
|
|
|
/**
|
|
|
|
* microcode_check() - Check if any CPU capabilities changed after an update.
|
|
|
|
* @prev_info: CPU capabilities stored before an update.
|
|
|
|
*
|
2018-02-16 04:26:39 -07:00
|
|
|
* The microcode loader calls this upon late microcode load to recheck features,
|
2023-08-04 00:58:53 -07:00
|
|
|
* only when microcode has been updated. Caller holds and CPU hotplug lock.
|
2023-01-09 08:35:50 -07:00
|
|
|
*
|
|
|
|
* Return: None
|
2018-02-16 04:26:39 -07:00
|
|
|
*/
|
2023-01-09 08:35:50 -07:00
|
|
|
void microcode_check(struct cpuinfo_x86 *prev_info)
|
2018-02-16 04:26:39 -07:00
|
|
|
{
|
2023-01-09 08:35:51 -07:00
|
|
|
struct cpuinfo_x86 curr_info;
|
2018-02-16 04:26:40 -07:00
|
|
|
|
2018-02-16 04:26:39 -07:00
|
|
|
perf_check_microcode();
|
2018-02-16 04:26:40 -07:00
|
|
|
|
2023-07-15 04:41:28 -07:00
|
|
|
amd_check_microcode();
|
|
|
|
|
2023-01-09 08:35:51 -07:00
|
|
|
store_cpu_caps(&curr_info);
|
2018-02-16 04:26:40 -07:00
|
|
|
|
2023-01-09 08:35:51 -07:00
|
|
|
if (!memcmp(&prev_info->x86_capability, &curr_info.x86_capability,
|
2023-01-09 08:35:50 -07:00
|
|
|
sizeof(prev_info->x86_capability)))
|
2018-02-16 04:26:40 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
pr_warn("x86/CPU: CPU features have changed after loading microcode, but might not take effect.\n");
|
|
|
|
pr_warn("x86/CPU: Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
|
2018-02-16 04:26:39 -07:00
|
|
|
}
|
2022-05-25 09:12:30 -07:00
|
|
|
#endif
|
2019-07-22 11:47:17 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Invoked from core CPU hotplug code after hotplug operations
|
|
|
|
*/
|
|
|
|
void arch_smt_update(void)
|
|
|
|
{
|
|
|
|
/* Handle the speculative execution misfeatures */
|
|
|
|
cpu_bugs_smt_update();
|
2019-07-22 11:47:22 -07:00
|
|
|
/* Check whether IPI broadcasting can be enabled */
|
|
|
|
apic_smt_update();
|
2019-07-22 11:47:17 -07:00
|
|
|
}
|
2023-06-13 16:39:24 -07:00
|
|
|
|
|
|
|
void __init arch_cpu_finalize_init(void)
|
|
|
|
{
|
2024-03-22 11:56:35 -07:00
|
|
|
struct cpuinfo_x86 *c = this_cpu_ptr(&cpu_info);
|
|
|
|
|
2023-06-13 16:39:24 -07:00
|
|
|
identify_boot_cpu();
|
|
|
|
|
2024-02-28 15:20:32 -07:00
|
|
|
select_idle_routine();
|
|
|
|
|
2023-06-13 16:39:24 -07:00
|
|
|
/*
|
|
|
|
* identify_boot_cpu() initialized SMT support information, let the
|
|
|
|
* core code know.
|
|
|
|
*/
|
2024-02-13 14:06:12 -07:00
|
|
|
cpu_smt_set_num_threads(__max_threads_per_core, __max_threads_per_core);
|
2023-06-13 16:39:24 -07:00
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_SMP)) {
|
|
|
|
pr_info("CPU: ");
|
|
|
|
print_cpu_info(&boot_cpu_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
cpu_select_mitigations();
|
|
|
|
|
|
|
|
arch_smt_update();
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_X86_32)) {
|
|
|
|
/*
|
|
|
|
* Check whether this is a real i386 which is not longer
|
|
|
|
* supported and fixup the utsname.
|
|
|
|
*/
|
|
|
|
if (boot_cpu_data.x86 < 4)
|
|
|
|
panic("Kernel requires i486+ for 'invlpg' and other features");
|
|
|
|
|
|
|
|
init_utsname()->machine[1] =
|
|
|
|
'0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
|
|
|
|
}
|
|
|
|
|
2023-06-13 16:39:46 -07:00
|
|
|
/*
|
|
|
|
* Must be before alternatives because it might set or clear
|
|
|
|
* feature bits.
|
|
|
|
*/
|
|
|
|
fpu__init_system();
|
|
|
|
fpu__init_cpu();
|
|
|
|
|
2024-03-22 11:56:35 -07:00
|
|
|
/*
|
|
|
|
* Ensure that access to the per CPU representation has the initial
|
|
|
|
* boot CPU configuration.
|
|
|
|
*/
|
|
|
|
*c = boot_cpu_data;
|
|
|
|
c->initialized = true;
|
|
|
|
|
2023-06-13 16:39:24 -07:00
|
|
|
alternative_instructions();
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_X86_64)) {
|
2024-10-23 18:17:46 -07:00
|
|
|
unsigned long USER_PTR_MAX = TASK_SIZE_MAX-1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Enable this when LAM is gated on LASS support
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_LAM))
|
|
|
|
USER_PTR_MAX = (1ul << 63) - PAGE_SIZE - 1;
|
|
|
|
*/
|
|
|
|
runtime_const_init(ptr, USER_PTR_MAX);
|
|
|
|
|
2023-06-13 16:39:24 -07:00
|
|
|
/*
|
|
|
|
* Make sure the first 2MB area is not mapped by huge pages
|
|
|
|
* There are typically fixed size MTRRs in there and overlapping
|
|
|
|
* MTRRs into large pages causes slow downs.
|
|
|
|
*
|
|
|
|
* Right now we don't do that with gbpages because there seems
|
|
|
|
* very little benefit for that case.
|
|
|
|
*/
|
|
|
|
if (!direct_gbpages)
|
|
|
|
set_memory_4k((unsigned long)__va(0), 1);
|
|
|
|
} else {
|
|
|
|
fpu__init_check_bugs();
|
|
|
|
}
|
2023-06-13 16:39:41 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This needs to be called before any devices perform DMA
|
|
|
|
* operations that might use the SWIOTLB bounce buffers. It will
|
|
|
|
* mark the bounce buffers as decrypted so that their usage will
|
|
|
|
* not cause "plain-text" data to be decrypted when accessed. It
|
|
|
|
* must be called after late_time_init() so that Hyper-V x86/x64
|
|
|
|
* hypercalls work when the SWIOTLB bounce buffers are decrypted.
|
|
|
|
*/
|
|
|
|
mem_encrypt_init();
|
2023-06-13 16:39:24 -07:00
|
|
|
}
|