Re: [PATCH v2 05/10] percpu: Wire up cmpxchg128

From: Mark Rutland
Date: Fri Feb 03 2023 - 12:02:25 EST


On Thu, Feb 02, 2023 at 03:50:35PM +0100, Peter Zijlstra wrote:
> In order to replace cmpxchg_double() with the newly minted
> cmpxchg128() family of functions, wire it up in this_cpu_cmpxchg().
>
> Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
> ---
> arch/arm64/include/asm/percpu.h | 21 +++++++++++++++
> arch/s390/include/asm/percpu.h | 17 ++++++++++++
> arch/x86/include/asm/percpu.h | 56 ++++++++++++++++++++++++++++++++++++++++
> include/asm-generic/percpu.h | 8 +++++
> include/linux/percpu-defs.h | 20 ++++++++++++--
> 5 files changed, 120 insertions(+), 2 deletions(-)

For arm64:

Acked-by: Mark Rutland <mark.rutland@xxxxxxx>

Mark.

>
> --- a/arch/arm64/include/asm/percpu.h
> +++ b/arch/arm64/include/asm/percpu.h
> @@ -140,6 +140,10 @@ PERCPU_RET_OP(add, add, ldadd)
> * re-enabling preemption for preemptible kernels, but doing that in a way
> * which builds inside a module would mean messing directly with the preempt
> * count. If you do this, peterz and tglx will hunt you down.
> + *
> + * Not to mention it'll break the actual preemption model for missing a
> + * preemption point when TIF_NEED_RESCHED gets set while preemption is
> + * disabled.
> */
> #define this_cpu_cmpxchg_double_8(ptr1, ptr2, o1, o2, n1, n2) \
> ({ \
> @@ -240,6 +244,23 @@ PERCPU_RET_OP(add, add, ldadd)
> #define this_cpu_cmpxchg_8(pcp, o, n) \
> _pcp_protect_return(cmpxchg_relaxed, pcp, o, n)
>
> +#define this_cpu_cmpxchg_16(pcp, o, n) \
> +({ \
> + typedef typeof(pcp) pcp_op_T__; \
> + union { \
> + pcp_op_T__ pot; \
> + u128 val; \
> + } old__, new__, ret__; \
> + pcp_op_T__ *ptr__; \
> + old__.pot = o; \
> + new__.pot = n; \
> + preempt_disable_notrace(); \
> + ptr__ = raw_cpu_ptr(&(pcp)); \
> + ret__.val = cmpxchg128_local((void *)ptr__, old__.val, new__.val); \
> + preempt_enable_notrace(); \
> + ret__.pot; \
> +})
> +
> #ifdef __KVM_NVHE_HYPERVISOR__
> extern unsigned long __hyp_per_cpu_offset(unsigned int cpu);
> #define __per_cpu_offset
> --- a/arch/s390/include/asm/percpu.h
> +++ b/arch/s390/include/asm/percpu.h
> @@ -148,6 +148,23 @@
> #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
> #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
>
> +#define this_cpu_cmpxchg_16(pcp, oval, nval) \
> +({ \
> + typedef typeof(pcp) pcp_op_T__; \
> + union { \
> + pcp_op_T__ pot; \
> + u128 val; \
> + } old__, new__, ret__; \
> + pcp_op_T__ *ptr__; \
> + old__.pot = oval; \
> + new__.pot = nval; \
> + preempt_disable_notrace(); \
> + ptr__ = raw_cpu_ptr(&(pcp)); \
> + ret__.val = cmpxchg128((void *)ptr__, old__.val, new__.val); \
> + preempt_enable_notrace(); \
> + ret__.pot; \
> +})
> +
> #define arch_this_cpu_xchg(pcp, nval) \
> ({ \
> typeof(pcp) *ptr__; \
> --- a/arch/x86/include/asm/percpu.h
> +++ b/arch/x86/include/asm/percpu.h
> @@ -210,6 +210,62 @@ do { \
> (typeof(_var))(unsigned long) pco_old__; \
> })
>
> +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_CMPXCHG64)
> +#define percpu_cmpxchg64_op(size, qual, _var, _oval, _nval) \
> +({ \
> + union { \
> + typeof(_var) var; \
> + struct { \
> + u32 low, high; \
> + }; \
> + } old__, new__; \
> + \
> + old__.var = _oval; \
> + new__.var = _nval; \
> + \
> + asm qual ("cmpxchg8b " __percpu_arg([var]) \
> + : [var] "+m" (_var), \
> + "+a" (old__.low), \
> + "+d" (old__.high) \
> + : "b" (new__.low), \
> + "c" (new__.high) \
> + : "memory"); \
> + \
> + old__.var; \
> +})
> +
> +#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg64_op(8, , pcp, oval, nval)
> +#define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg64_op(8, volatile, pcp, oval, nval)
> +#endif
> +
> +#ifdef CONFIG_X86_64
> +#define percpu_cmpxchg128_op(size, qual, _var, _oval, _nval) \
> +({ \
> + union { \
> + typeof(_var) var; \
> + struct { \
> + u64 low, high; \
> + }; \
> + } old__, new__; \
> + \
> + old__.var = _oval; \
> + new__.var = _nval; \
> + \
> + asm qual ("cmpxchg16b " __percpu_arg([var]) \
> + : [var] "+m" (_var), \
> + "+a" (old__.low), \
> + "+d" (old__.high) \
> + : "b" (new__.low), \
> + "c" (new__.high) \
> + : "memory"); \
> + \
> + old__.var; \
> +})
> +
> +#define raw_cpu_cmpxchg_16(pcp, oval, nval) percpu_cmpxchg128_op(16, , pcp, oval, nval)
> +#define this_cpu_cmpxchg_16(pcp, oval, nval) percpu_cmpxchg128_op(16, volatile, pcp, oval, nval)
> +#endif
> +
> /*
> * this_cpu_read() makes gcc load the percpu variable every time it is
> * accessed while this_cpu_read_stable() allows the value to be cached.
> --- a/include/asm-generic/percpu.h
> +++ b/include/asm-generic/percpu.h
> @@ -298,6 +298,10 @@ do { \
> #define raw_cpu_cmpxchg_8(pcp, oval, nval) \
> raw_cpu_generic_cmpxchg(pcp, oval, nval)
> #endif
> +#ifndef raw_cpu_cmpxchg_16
> +#define raw_cpu_cmpxchg_16(pcp, oval, nval) \
> + raw_cpu_generic_cmpxchg(pcp, oval, nval)
> +#endif
>
> #ifndef raw_cpu_cmpxchg_double_1
> #define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
> @@ -423,6 +427,10 @@ do { \
> #define this_cpu_cmpxchg_8(pcp, oval, nval) \
> this_cpu_generic_cmpxchg(pcp, oval, nval)
> #endif
> +#ifndef this_cpu_cmpxchg_16
> +#define this_cpu_cmpxchg_16(pcp, oval, nval) \
> + this_cpu_generic_cmpxchg(pcp, oval, nval)
> +#endif
>
> #ifndef this_cpu_cmpxchg_double_1
> #define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
> --- a/include/linux/percpu-defs.h
> +++ b/include/linux/percpu-defs.h
> @@ -343,6 +343,22 @@ static inline void __this_cpu_preempt_ch
> pscr2_ret__; \
> })
>
> +#define __pcpu_size16_call_return2(stem, variable, ...) \
> +({ \
> + typeof(variable) pscr2_ret__; \
> + __verify_pcpu_ptr(&(variable)); \
> + switch(sizeof(variable)) { \
> + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \
> + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \
> + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \
> + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \
> + case 16: pscr2_ret__ = stem##16(variable, __VA_ARGS__); break; \
> + default: \
> + __bad_size_call_parameter(); break; \
> + } \
> + pscr2_ret__; \
> +})
> +
> /*
> * Special handling for cmpxchg_double. cmpxchg_double is passed two
> * percpu variables. The first has to be aligned to a double word
> @@ -425,7 +441,7 @@ do { \
> #define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
> #define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
> #define raw_cpu_cmpxchg(pcp, oval, nval) \
> - __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
> + __pcpu_size16_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
> #define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
> __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
>
> @@ -512,7 +528,7 @@ do { \
> #define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
> #define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
> #define this_cpu_cmpxchg(pcp, oval, nval) \
> - __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
> + __pcpu_size16_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
> #define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
> __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
>
>
>