Re: [PATCH V2 4/5] x86/mcheck/AMD: Fix LVT offset configuration for thresholding

From: Borislav Petkov
Date: Sat Jan 16 2016 - 05:38:57 EST


On Fri, Jan 15, 2016 at 05:50:35PM -0600, Aravind Gopalakrishnan wrote:
> For processor families with SMCA feature, the LVT offset
> for threshold interrupts is configured only in MSR 0xC0000410
> and not in each per bank MISC register as was done in earlier
> families.
>
> Fixing the code here to obtain the LVT offset from the correct
> MSR for those families which have SMCA feature enabled.
>
> Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@xxxxxxx>
> ---
> arch/x86/kernel/cpu/mcheck/mce_amd.c | 34 +++++++++++++++++++++++++++++++++-
> 1 file changed, 33 insertions(+), 1 deletion(-)

Did a small cleanup ontop:

---
From: Borislav Petkov <bp@xxxxxxx>
Subject: [PATCH] x86/mce/AMD: Carve out threshold block preparation

mce_amd_feature_init() was getting pretty fat, carve out the
threshold_block setup into a separate function on order to simplify
flow and make it more understandable.

No functionality change.

Signed-off-by: Borislav Petkov <bp@xxxxxxx>
Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@xxxxxxx>
---
arch/x86/kernel/cpu/mcheck/mce_amd.c | 88 ++++++++++++++++++++----------------
1 file changed, 50 insertions(+), 38 deletions(-)

diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index b9739b9b9341..57c7b296d1c5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -267,14 +267,60 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
wrmsr(MSR_CU_DEF_ERR, low, high);
}

+static int
+prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
+ int offset, u32 misc_high)
+{
+ unsigned int cpu = smp_processor_id();
+ struct threshold_block b;
+ int new;
+
+ if (!block)
+ per_cpu(bank_map, cpu) |= (1 << bank);
+
+ memset(&b, 0, sizeof(b));
+ b.cpu = cpu;
+ b.bank = bank;
+ b.block = block;
+ b.address = addr;
+ b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
+
+ if (!b.interrupt_capable)
+ goto done;
+
+ b.interrupt_enable = 1;
+
+ if (mce_flags.smca) {
+ u32 smca_low = 0, smca_high = 0;
+
+ /* Gather LVT offset for thresholding */
+ if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
+ goto out;
+
+ new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
+ } else {
+ new = (misc_high & MASK_LVTOFF_HI) >> 20;
+ }
+
+ offset = setup_APIC_mce_threshold(offset, new);
+
+ if ((offset == new) &&
+ (mce_threshold_vector != amd_threshold_interrupt))
+ mce_threshold_vector = amd_threshold_interrupt;
+
+done:
+ mce_threshold_block_init(&b, offset);
+
+out:
+ return offset;
+}
+
/* cpu init entry point, called from mce.c with preempt off */
void mce_amd_feature_init(struct cpuinfo_x86 *c)
{
- struct threshold_block b;
- unsigned int cpu = smp_processor_id();
u32 low = 0, high = 0, address = 0;
unsigned int bank, block;
- int offset = -1, new;
+ int offset = -1;

for (bank = 0; bank < mca_cfg.banks; ++bank) {
for (block = 0; block < NR_BLOCKS; ++block) {
@@ -299,41 +345,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
(high & MASK_LOCKED_HI))
continue;

- if (!block)
- per_cpu(bank_map, cpu) |= (1 << bank);
-
- memset(&b, 0, sizeof(b));
- b.cpu = cpu;
- b.bank = bank;
- b.block = block;
- b.address = address;
- b.interrupt_capable = lvt_interrupt_supported(bank, high);
-
- if (!b.interrupt_capable)
- goto init;
-
- b.interrupt_enable = 1;
-
- if (mce_flags.smca) {
- u32 smca_low = 0, smca_high = 0;
-
- /* Gather LVT offset for thresholding */
- if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
- break;
-
- new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
- } else {
- new = (high & MASK_LVTOFF_HI) >> 20;
- }
-
- offset = setup_APIC_mce_threshold(offset, new);
-
- if ((offset == new) &&
- (mce_threshold_vector != amd_threshold_interrupt))
- mce_threshold_vector = amd_threshold_interrupt;
-
-init:
- mce_threshold_block_init(&b, offset);
+ offset = prepare_threshold_block(bank, block, address, offset, high);
}
}

--
2.3.5

--
Regards/Gruss,
Boris.

ECO tip #101: Trim your mails when you reply.