[RFC][PATCH 25/34] x86/cpu: Move cache bits to global config

From: Dave Hansen
Date: Thu Feb 22 2024 - 13:49:03 EST



From: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>

x86_cache_bits is established and stored per-cpu despite being system-wide.
Move it from 'struct cpuinfo_x86' to 'x86_config' and give it a helper.

Signed-off-by: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
---

b/arch/x86/include/asm/processor.h | 10 +++++++---
b/arch/x86/kernel/cpu/common.c | 8 ++++----
b/arch/x86/kvm/mmu/spte.c | 6 +++---
3 files changed, 14 insertions(+), 10 deletions(-)

diff -puN arch/x86/include/asm/processor.h~config-cache_bits arch/x86/include/asm/processor.h
--- a/arch/x86/include/asm/processor.h~config-cache_bits 2024-02-22 10:09:00.768936544 -0800
+++ b/arch/x86/include/asm/processor.h 2024-02-22 10:09:00.772936701 -0800
@@ -154,8 +154,6 @@ struct cpuinfo_x86 {
/* Is SMT active on this core? */
bool smt_active;
u32 microcode;
- /* Address space bits used by the cache internally */
- u8 x86_cache_bits;
unsigned initialized : 1;
} __randomize_layout;

@@ -195,6 +193,7 @@ struct x86_sys_config {
/* Address bits supported by all processors */
u8 phys_bits;
u8 virt_bits;
+ u8 cache_bits;
u16 clflush_size;
int cache_alignment; /* in bytes */
};
@@ -241,7 +240,7 @@ extern void cpu_detect(struct cpuinfo_x8

static inline unsigned long long l1tf_pfn_limit(void)
{
- return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
+ return BIT_ULL(x86_config.cache_bits - 1 - PAGE_SHIFT);
}

extern void early_cpu_init(void);
@@ -816,6 +815,11 @@ static inline u8 x86_virt_bits(void)
return x86_config.virt_bits;
}

+static inline u8 x86_cache_bits(void)
+{
+ return x86_config.cache_bits;
+}
+
static inline u8 x86_clflush_size(void)
{
return x86_config.clflush_size;
diff -puN arch/x86/kernel/cpu/common.c~config-cache_bits arch/x86/kernel/cpu/common.c
--- a/arch/x86/kernel/cpu/common.c~config-cache_bits 2024-02-22 10:09:00.768936544 -0800
+++ b/arch/x86/kernel/cpu/common.c 2024-02-22 10:09:00.772936701 -0800
@@ -1138,15 +1138,15 @@ void get_cpu_address_sizes(struct cpuinf
}
x86_config.clflush_size = detect_clflush_size(c);

- c->x86_cache_bits = x86_config.phys_bits;
- if (c->x86_cache_bits < bsp_addr_config.min_cache_bits)
- c->x86_cache_bits = bsp_addr_config.min_cache_bits;
+ x86_config.cache_bits = x86_config.phys_bits;
+ if (x86_config.cache_bits < bsp_addr_config.min_cache_bits)
+ x86_config.cache_bits = bsp_addr_config.min_cache_bits;

x86_config.cache_alignment = x86_clflush_size();
if (bsp_addr_config.cache_align_mult)
x86_config.cache_alignment *= bsp_addr_config.cache_align_mult;

- /* Do this last to avoid affecting ->x86_cache_bits. */
+ /* Do this last to avoid affecting '.cache_bits'. */
x86_config.phys_bits -= bsp_addr_config.phys_addr_reduction_bits;
}

diff -puN arch/x86/kvm/mmu/spte.c~config-cache_bits arch/x86/kvm/mmu/spte.c
--- a/arch/x86/kvm/mmu/spte.c~config-cache_bits 2024-02-22 10:09:00.772936701 -0800
+++ b/arch/x86/kvm/mmu/spte.c 2024-02-22 10:09:00.772936701 -0800
@@ -470,12 +470,12 @@ void kvm_mmu_reset_all_pte_masks(void)
shadow_nonpresent_or_rsvd_mask = 0;
low_phys_bits = x86_phys_bits();
if (boot_cpu_has_bug(X86_BUG_L1TF) &&
- !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
+ !WARN_ON_ONCE(x86_cache_bits() >=
52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
- low_phys_bits = boot_cpu_data.x86_cache_bits
+ low_phys_bits = x86_cache_bits()
- SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
shadow_nonpresent_or_rsvd_mask =
- rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
+ rsvd_bits(low_phys_bits, x86_cache_bits() - 1);
}

shadow_nonpresent_or_rsvd_lower_gfn_mask =
_