Re: [PATCH v2 -tip] x86/percpu: Use C for arch_raw_cpu_ptr()

From: Uros Bizjak
Date: Wed Oct 18 2023 - 11:18:11 EST


On Wed, Oct 18, 2023 at 4:46 PM Nadav Amit <namit@xxxxxxxxxx> wrote:

> >> Looks like another case of underspecified functionality where both
> >> compilers differ. Luckily, both DTRT when aliases are hidden in
> >> another TU.
> >
> > Attached is the prototype patch that works for me (together with
> > Linus' FPU switching patch).
>
> In general looks good. See some minor issues below.
>
> > --- a/arch/x86/include/asm/current.h
> > +++ b/arch/x86/include/asm/current.h
> > @@ -36,10 +36,23 @@ static_assert(sizeof(struct pcpu_hot) == 64);
> >
> > DECLARE_PER_CPU_ALIGNED(struct pcpu_hot, pcpu_hot);
> >
> > +/*
> > + *
> > + */
>
> Obviously some further comments to clarify why struct pcpu_hot is
> defined in percpu-hot.c (the GCC manual says: "It is an error if
> the alias target is not defined in the same translation unit as the
> alias” which can be used as part of the explanation.)

Sure.

>
> > +DECLARE_PER_CPU_ALIGNED(const struct pcpu_hot __percpu_seg_override,
> > + const_pcpu_hot);
> > +
> > +#ifdef CONFIG_USE_X86_SEG_SUPPORT
> > +static __always_inline struct task_struct *get_current(void)
> > +{
> > + return const_pcpu_hot.current_task;
> > +}
> > +#else
> > static __always_inline struct task_struct *get_current(void)
> > {
> > return this_cpu_read_stable(pcpu_hot.current_task);
> > }
> > +#endif
>
>
> Please consider using IS_ENABLED() to avoid the ifdef’ry.
>
> So this would turn to be:
>
> static __always_inline struct task_struct *get_current(void)
> {
> if (IS_ENABLED(CONFIG_USE_X86_SEG_SUPPORT))
> return const_pcpu_hot.current_task;
>
> return this_cpu_read_stable(pcpu_hot.current_task);
> }

I am more thinking of moving the ifdeffery to percpu.h, something like
the attached part of the patch. This would handle all current and
future stable percpu variables.

Thanks,
Uros.
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 54746903b8c3..607f6b8e16ca 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -417,7 +417,6 @@ do { \
#define this_cpu_read_stable_2(pcp) percpu_stable_op(2, "mov", pcp)
#define this_cpu_read_stable_4(pcp) percpu_stable_op(4, "mov", pcp)
#define this_cpu_read_stable_8(pcp) percpu_stable_op(8, "mov", pcp)
-#define this_cpu_read_stable(pcp) __pcpu_size_call_return(this_cpu_read_stable_, pcp)

#ifdef CONFIG_USE_X86_SEG_SUPPORT

@@ -453,6 +452,8 @@ do { \
#define this_cpu_write_8(pcp, val) __raw_cpu_write(volatile, pcp, val)
#endif

+#define this_cpu_read_stable(pcp) const_##pcp
+
#else /* CONFIG_USE_X86_SEG_SUPPORT */

#define raw_cpu_read_1(pcp) percpu_from_op(1, , "mov", pcp)
@@ -477,6 +478,9 @@ do { \
#define this_cpu_write_8(pcp, val) percpu_to_op(8, volatile, "mov", (pcp), val)
#endif

+#define this_cpu_read_stable(pcp) \
+ __pcpu_size_call_return(this_cpu_read_stable_, pcp)
+
#endif /* CONFIG_USE_X86_SEG_SUPPORT */

#define raw_cpu_add_1(pcp, val) percpu_add_op(1, , (pcp), val)