Re: [PATCH RFC] x86/cpu: fix intermittent lockup on poweroff

From: Thomas Gleixner
Date: Wed Apr 26 2023 - 12:37:08 EST


Tony!

On Wed, Apr 26 2023 at 10:45, Tony Battersby wrote:
> On 4/25/23 17:05, Thomas Gleixner wrote:
> For test #1, I have never used IPI before, so I would have to look into
> how to do that.  Or you could send me a patch to test if you still want
> the test done.  But test #2 produced results, so maybe it is not
> necessary.

I think we can spare that exercise.

> For test #2, I re-enabled native_wbinvd() by reverting the patch that I
> sent, and then I applied the following patch:
>
> diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
> index 375b33ecafa2..1a9b225c85b6 100644
> --- a/arch/x86/kernel/smp.c
> +++ b/arch/x86/kernel/smp.c
> @@ -212,6 +212,7 @@ static void native_stop_other_cpus(int wait)
> udelay(1);
> }
>
> + mdelay(100);
> local_irq_save(flags);
> disable_local_APIC();
> mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
>
> With that I got a successful power-off 10 times in a row.

Thanks for trying this!

The problem really seems to be that the control CPU goes off before the
other CPUs have finished and depending on timing that causes the
wreckage. Otherwise the mdelay(100) would not have helped at all.

But looking at it, that num_online_cpus() == 1 check in
stop_other_cpus() is fragile as hell independent of that wbinvd() issue.

Something like the completely untested below should cure that.

Thanks,

tglx
---
arch/x86/include/asm/cpu.h | 2 ++
arch/x86/kernel/process.c | 10 ++++++++++
arch/x86/kernel/smp.c | 15 ++++++++++++---
3 files changed, 24 insertions(+), 3 deletions(-)

--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -98,4 +98,6 @@ extern u64 x86_read_arch_cap_msr(void);
int intel_find_matching_signature(void *mc, unsigned int csig, int cpf);
int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type);

+extern atomic_t stop_cpus_count;
+
#endif /* _ASM_X86_CPU_H */
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -752,6 +752,8 @@ bool xen_set_default_idle(void)
}
#endif

+atomic_t stop_cpus_count;
+
void __noreturn stop_this_cpu(void *dummy)
{
local_irq_disable();
@@ -776,6 +778,14 @@ void __noreturn stop_this_cpu(void *dumm
*/
if (cpuid_eax(0x8000001f) & BIT(0))
native_wbinvd();
+
+ /*
+ * native_stop_other_cpus() will write to @stop_cpus_count after
+ * observing that it went down to zero, which will invalidate the
+ * cacheline on this CPU.
+ */
+ atomic_dec(&stop_cpus_count);
+
for (;;) {
/*
* Use native_halt() so that memory contents don't change
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -27,6 +27,7 @@
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/apic.h>
+#include <asm/cpu.h>
#include <asm/idtentry.h>
#include <asm/nmi.h>
#include <asm/mce.h>
@@ -171,6 +172,8 @@ static void native_stop_other_cpus(int w
if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
return;

+ atomic_set(&stop_cpus_count, num_online_cpus() - 1);
+
/* sync above data before sending IRQ */
wmb();

@@ -183,12 +186,12 @@ static void native_stop_other_cpus(int w
* CPUs reach shutdown state.
*/
timeout = USEC_PER_SEC;
- while (num_online_cpus() > 1 && timeout--)
+ while (atomic_read(&stop_cpus_count) > 0 && timeout--)
udelay(1);
}

/* if the REBOOT_VECTOR didn't work, try with the NMI */
- if (num_online_cpus() > 1) {
+ if (atomic_read(&stop_cpus_count) > 0) {
/*
* If NMI IPI is enabled, try to register the stop handler
* and send the IPI. In any case try to wait for the other
@@ -208,7 +211,7 @@ static void native_stop_other_cpus(int w
* one or more CPUs do not reach shutdown state.
*/
timeout = USEC_PER_MSEC * 10;
- while (num_online_cpus() > 1 && (wait || timeout--))
+ while (atomic_read(&stop_cpus_count) > 0 && (wait || timeout--))
udelay(1);
}

@@ -216,6 +219,12 @@ static void native_stop_other_cpus(int w
disable_local_APIC();
mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
local_irq_restore(flags);
+
+ /*
+ * Ensure that the cache line is invalidated on the other CPUs. See
+ * comment vs. SME in stop_this_cpu().
+ */
+ atomic_set(&stop_cpus_count, INT_MAX);
}

/*