[PATCH 2/2] context_tracking, rcu: Rename RCU_DYNTICKS_IDX to CT_DYNTICKS_IDX

From: Valentin Schneider
Date: Wed Mar 27 2024 - 07:29:55 EST


The symbols relating to the CT_STATE part of context_tracking.state are now
all prefixed with CT_STATE.

The RCU dynticks counter part of that atomic variable still involves
symbols with different prefixes, align them all to be prefixed with
CT_DYNTICKS, as CT_DYNTICKS_MASK already is.

Signed-off-by: Valentin Schneider <vschneid@xxxxxxxxxx>
---
include/linux/context_tracking.h | 6 +++---
include/linux/context_tracking_state.h | 2 +-
kernel/context_tracking.c | 22 +++++++++++-----------
kernel/rcu/tree.c | 12 ++++++------
4 files changed, 21 insertions(+), 21 deletions(-)

diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 28fcfa1849032..a9c3f2c0e04e3 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -119,7 +119,7 @@ extern void ct_idle_exit(void);
*/
static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void)
{
- return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX);
+ return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_IDX);
}

/*
@@ -142,7 +142,7 @@ static __always_inline bool warn_rcu_enter(void)
preempt_disable_notrace();
if (rcu_dynticks_curr_cpu_in_eqs()) {
ret = true;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_DYNTICKS_IDX);
}

return ret;
@@ -151,7 +151,7 @@ static __always_inline bool warn_rcu_enter(void)
static __always_inline void warn_rcu_exit(bool rcu)
{
if (rcu)
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_DYNTICKS_IDX);
preempt_enable_notrace();
}

diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index f1c53125edee2..73af5a13b4fc5 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -19,7 +19,7 @@ enum ctx_state {
};

/* Even value for idle, else odd. */
-#define RCU_DYNTICKS_IDX CT_STATE_MAX
+#define CT_DYNTICKS_IDX CT_STATE_MAX

#define CT_STATE_MASK (CT_STATE_MAX - 1)
#define CT_DYNTICKS_MASK (~CT_STATE_MASK)
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 21f2cf90ed0bf..586c17fef1327 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
.dynticks_nesting = 1,
.dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
#endif
- .state = ATOMIC_INIT(RCU_DYNTICKS_IDX),
+ .state = ATOMIC_INIT(CT_DYNTICKS_IDX),
};
EXPORT_SYMBOL_GPL(context_tracking);

@@ -90,7 +90,7 @@ static noinstr void ct_kernel_exit_state(int offset)
rcu_dynticks_task_trace_enter(); // Before ->dynticks update!
seq = ct_state_inc(offset);
// RCU is no longer watching. Better be in extended quiescent state!
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & RCU_DYNTICKS_IDX));
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && (seq & CT_DYNTICKS_IDX));
}

/*
@@ -110,7 +110,7 @@ static noinstr void ct_kernel_enter_state(int offset)
seq = ct_state_inc(offset);
// RCU is now watching. Better not be in an extended quiescent state!
rcu_dynticks_task_trace_exit(); // After ->dynticks update!
- WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & RCU_DYNTICKS_IDX));
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(seq & CT_DYNTICKS_IDX));
}

/*
@@ -236,7 +236,7 @@ void noinstr ct_nmi_exit(void)
instrumentation_end();

// RCU is watching here ...
- ct_kernel_exit_state(RCU_DYNTICKS_IDX);
+ ct_kernel_exit_state(CT_DYNTICKS_IDX);
// ... but is no longer watching here.

if (!in_nmi())
@@ -277,7 +277,7 @@ void noinstr ct_nmi_enter(void)
rcu_dynticks_task_exit();

// RCU is not watching here ...
- ct_kernel_enter_state(RCU_DYNTICKS_IDX);
+ ct_kernel_enter_state(CT_DYNTICKS_IDX);
// ... but is watching here.

instrumentation_begin();
@@ -317,7 +317,7 @@ void noinstr ct_nmi_enter(void)
void noinstr ct_idle_enter(void)
{
WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !raw_irqs_disabled());
- ct_kernel_exit(false, RCU_DYNTICKS_IDX + CT_STATE_IDLE);
+ ct_kernel_exit(false, CT_DYNTICKS_IDX + CT_STATE_IDLE);
}
EXPORT_SYMBOL_GPL(ct_idle_enter);

@@ -335,7 +335,7 @@ void noinstr ct_idle_exit(void)
unsigned long flags;

raw_local_irq_save(flags);
- ct_kernel_enter(false, RCU_DYNTICKS_IDX - CT_STATE_IDLE);
+ ct_kernel_enter(false, CT_DYNTICKS_IDX - CT_STATE_IDLE);
raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(ct_idle_exit);
@@ -504,7 +504,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
* CPU doesn't need to maintain the tick for RCU maintenance purposes
* when the CPU runs in userspace.
*/
- ct_kernel_exit(true, RCU_DYNTICKS_IDX + state);
+ ct_kernel_exit(true, CT_DYNTICKS_IDX + state);

/*
* Special case if we only track user <-> kernel transitions for tickless
@@ -534,7 +534,7 @@ void noinstr __ct_user_enter(enum ctx_state state)
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since
- * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ * RCU only requires CT_DYNTICKS_IDX increments to be fully
* ordered.
*/
raw_atomic_add(state, &ct->state);
@@ -620,7 +620,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
* Exit RCU idle mode while entering the kernel because it can
* run a RCU read side critical section anytime.
*/
- ct_kernel_enter(true, RCU_DYNTICKS_IDX - state);
+ ct_kernel_enter(true, CT_DYNTICKS_IDX - state);
if (state == CT_STATE_USER) {
instrumentation_begin();
vtime_user_exit(current);
@@ -644,7 +644,7 @@ void noinstr __ct_user_exit(enum ctx_state state)
/*
* Tracking for vtime and RCU EQS. Make sure we don't race
* with NMIs. OTOH we don't care about ordering here since
- * RCU only requires RCU_DYNTICKS_IDX increments to be fully
+ * RCU only requires CT_DYNTICKS_IDX increments to be fully
* ordered.
*/
raw_atomic_sub(state, &ct->state);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index d9642dd06c253..3dbdcd12b876e 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -259,9 +259,9 @@ void rcu_softirq_qs(void)
*/
static void rcu_dynticks_eqs_online(void)
{
- if (ct_dynticks() & RCU_DYNTICKS_IDX)
+ if (ct_dynticks() & CT_DYNTICKS_IDX)
return;
- ct_state_inc(RCU_DYNTICKS_IDX);
+ ct_state_inc(CT_DYNTICKS_IDX);
}

/*
@@ -280,7 +280,7 @@ static int rcu_dynticks_snap(int cpu)
*/
static bool rcu_dynticks_in_eqs(int snap)
{
- return !(snap & RCU_DYNTICKS_IDX);
+ return !(snap & CT_DYNTICKS_IDX);
}

/*
@@ -302,7 +302,7 @@ bool rcu_dynticks_zero_in_eqs(int cpu, int *vp)
int snap;

// If not quiescent, force back to earlier extended quiescent state.
- snap = ct_dynticks_cpu(cpu) & ~RCU_DYNTICKS_IDX;
+ snap = ct_dynticks_cpu(cpu) & ~CT_DYNTICKS_IDX;
smp_rmb(); // Order ->dynticks and *vp reads.
if (READ_ONCE(*vp))
return false; // Non-zero, so report failure;
@@ -328,9 +328,9 @@ notrace void rcu_momentary_dyntick_idle(void)
int seq;

raw_cpu_write(rcu_data.rcu_need_heavy_qs, false);
- seq = ct_state_inc(2 * RCU_DYNTICKS_IDX);
+ seq = ct_state_inc(2 * CT_DYNTICKS_IDX);
/* It is illegal to call this from idle state. */
- WARN_ON_ONCE(!(seq & RCU_DYNTICKS_IDX));
+ WARN_ON_ONCE(!(seq & CT_DYNTICKS_IDX));
rcu_preempt_deferred_qs(current);
}
EXPORT_SYMBOL_GPL(rcu_momentary_dyntick_idle);
--
2.43.0