Re: [PATCH 2/3] x86/speculation: Support Automatic IBRS

From: Kim Phillips
Date: Fri Nov 11 2022 - 19:46:59 EST


On 11/11/22 6:40 AM, Thadeu Lima de Souza Cascardo wrote:
On Fri, Nov 11, 2022 at 01:09:37PM +0100, Borislav Petkov wrote:
On Mon, Nov 07, 2022 at 04:39:02PM -0600, Kim Phillips wrote:
I've started a version that has AUTOIBRS reuse SPECTRE_V2_EIBRS
spectre_v2_mitigation enum, but, so far, it's change to bugs.c
looks bigger: 58 lines changed vs. 34 (see below).

It can be smaller. You simply do:

if (cpu_has(c, X86_FEATURE_AUTOIBRS))
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);

and the rest should just work - see below.

And yes, as Peter says, when the user requests something, the user
should get it. No matter whether it makes sense or not.

OK & thanks.

@@ -1474,11 +1477,19 @@ static void __init spectre_v2_select_mitigation(void)
break;
case SPECTRE_V2_CMD_EIBRS_LFENCE:
- mode = SPECTRE_V2_EIBRS_LFENCE;
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
+ pr_err(SPECTRE_V2_EIBRS_AMD_MSG);
+ mode = SPECTRE_V2_EIBRS;
+ } else
+ mode = SPECTRE_V2_EIBRS_LFENCE;
break;
case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
- mode = SPECTRE_V2_EIBRS_RETPOLINE;
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
+ pr_err(SPECTRE_V2_EIBRS_AMD_MSG);
+ mode = SPECTRE_V2_EIBRS;
+ } else
+ mode = SPECTRE_V2_EIBRS_RETPOLINE;
break;
}

I am confused here. Isn't the agreement that the user should get what they
asked for? That is, instead of warning and changing the mode to
SPECTRE_V2_EIBRS, the kernel should still use lfence or retpoline as requested?

The point of those options was to protect against Branch History Injection
attacks and Intra-Mode Branch Target Injection attacks. The first one might not
affect the CPUs that support AUTOIBRS, though we haven't heard that.

The second one (IMBTI) is very likely still possible with AUTOIBRS and
retpolines should still protect against those attacks. So users who want to be
paranoid should still be able to opt for "eibrs,retpoline" and have retpolines
enabled.

I've removed the above and have the complete diff below. It includes patch 1/3 and
drops 3/3 for now due to Jim Mattson's comments. After some more testing, I'll
resubmit.

Thanks,

Kim


diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a465d5242774..b260a36dc3ef 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5698,9 +5698,10 @@
retpoline,generic - Retpolines
retpoline,lfence - LFENCE; indirect branch
retpoline,amd - alias for retpoline,lfence
- eibrs - enhanced IBRS
- eibrs,retpoline - enhanced IBRS + Retpolines
- eibrs,lfence - enhanced IBRS + LFENCE
+ eibrs - Enhanced/Auto IBRS
+ autoibrs - Enhanced/Auto IBRS
+ eibrs,retpoline - Enhanced/Auto IBRS + Retpolines
+ eibrs,lfence - Enhanced/Auto IBRS + LFENCE
ibrs - use IBRS to protect kernel
Not specifying this option is equivalent to
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 97669aaf1202..ec9a4eb8e7b9 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -84,7 +84,7 @@
/* CPU types for specific tunings: */
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-/* FREE, was #define X86_FEATURE_K7 ( 3*32+ 5) "" Athlon */
+#define X86_FEATURE_AUTOIBRS ( 3*32+ 5) /* AMD Automatic IBRS */
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index a3eb4d3e70b8..56e4f3aab31c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -30,6 +30,7 @@
#define _EFER_SVME 12 /* Enable virtualization */
#define _EFER_LMSLE 13 /* Long Mode Segment Limit Enable */
#define _EFER_FFXSR 14 /* Enable Fast FXSAVE/FXRSTOR */
+#define _EFER_AUTOIBRS 21 /* Enable Automatic IBRS */
#define EFER_SCE (1<<_EFER_SCE)
#define EFER_LME (1<<_EFER_LME)
@@ -38,6 +39,7 @@
#define EFER_SVME (1<<_EFER_SVME)
#define EFER_LMSLE (1<<_EFER_LMSLE)
#define EFER_FFXSR (1<<_EFER_FFXSR)
+#define EFER_AUTOIBRS (1<<_EFER_AUTOIBRS)
/* Intel MSRs. Some also available on other CPUs */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 66d7addf1784..4060ca8c2c60 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1229,7 +1229,7 @@ static const char * const spectre_v2_strings[] = {
[SPECTRE_V2_NONE] = "Vulnerable",
[SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
[SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
- [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
+ [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS",
[SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
[SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
[SPECTRE_V2_IBRS] = "Mitigation: IBRS",
@@ -1247,6 +1247,7 @@ static const struct {
{ "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
{ "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
{ "eibrs", SPECTRE_V2_CMD_EIBRS, false },
+ { "autoibrs", SPECTRE_V2_CMD_EIBRS, false },
{ "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
{ "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
{ "auto", SPECTRE_V2_CMD_AUTO, false },
@@ -1300,7 +1301,7 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
!boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
- pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
+ pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
mitigation_options[i].option);
return SPECTRE_V2_CMD_AUTO;
}
@@ -1486,8 +1487,12 @@ static void __init spectre_v2_select_mitigation(void)
pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
if (spectre_v2_in_ibrs_mode(mode)) {
- x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
- write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
+ msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
+ } else {
+ x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
+ write_spec_ctrl_current(x86_spec_ctrl_base, true);
+ }
}
switch (mode) {
@@ -1571,8 +1576,8 @@ static void __init spectre_v2_select_mitigation(void)
/*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around
- * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
- * enabled.
+ * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
+ * otherwise enabled.
*
* Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
* the user might select retpoline on the kernel command line and if
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 423a760fa9de..287b356ccf92 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1340,6 +1340,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
if (ia32_cap & ARCH_CAP_IBRS_ALL)
setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+ /* AMD's AutoIBRS is equivalent to Intel's eIBRS - use the Intel flag. */
+ if (cpu_has(c, X86_FEATURE_AUTOIBRS))
+ setup_force_cpu_cap(X86_FEATURE_IBRS_ENHANCED);
+
if (!cpu_matches(cpu_vuln_whitelist, NO_MDS) &&
!(ia32_cap & ARCH_CAP_MDS_NO)) {
setup_force_cpu_bug(X86_BUG_MDS);
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index f53944fb8f7f..cef8c3e688b4 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -45,8 +45,10 @@ static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
+ { X86_FEATURE_AUTOIBRS, CPUID_EAX, 20, 0x80000021, 0 },
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
+
{ 0, 0, 0, 0, 0 }
};