[RFC PATCH 51/86] preempt: disallow !PREEMPT_COUNT or !PREEMPTION

From: Ankur Arora
Date: Tue Nov 07 2023 - 17:05:42 EST


PREEMPT_COUNT and PREEMPTION are selected for all preemption models.
Mark configurations which might not have either as invalid.

Also stub cond_resched() since we don't actually need it for anything.

Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
include/linux/kernel.h | 11 ++---------
include/linux/preempt.h | 42 +++--------------------------------------
include/linux/sched.h | 4 +---
3 files changed, 6 insertions(+), 51 deletions(-)

diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cf077cd69643..a48900d8b409 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -95,13 +95,6 @@
struct completion;
struct user;

-#ifdef CONFIG_PREEMPT_VOLUNTARY
-extern int _cond_resched(void);
-# define might_resched() _cond_resched()
-#else
-# define might_resched() do { } while (0)
-#endif
-
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
extern void __might_resched(const char *file, int line, unsigned int offsets);
extern void __might_sleep(const char *file, int line);
@@ -121,7 +114,7 @@ extern void __cant_migrate(const char *file, int line);
* supposed to.
*/
# define might_sleep() \
- do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+ do { __might_sleep(__FILE__, __LINE__); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
*
@@ -163,7 +156,7 @@ extern void __cant_migrate(const char *file, int line);
static inline void __might_resched(const char *file, int line,
unsigned int offsets) { }
static inline void __might_sleep(const char *file, int line) { }
-# define might_sleep() do { might_resched(); } while (0)
+# define might_sleep() do { } while (0)
# define cant_sleep() do { } while (0)
# define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 0abc6a673c41..dc5125b9c36b 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -197,7 +197,9 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)

-#ifdef CONFIG_PREEMPT_COUNT
+#if !defined(CONFIG_PREEMPTION) || !defined(CONFIG_PREEMPT_COUNT)
+#error "Configurations with !CONFIG_PREEMPTION or !CONFIG_PREEMPT_COUNT are not supported."
+#endif

#define preempt_disable() \
do { \
@@ -215,7 +217,6 @@ do { \

#define preemptible() (preempt_count() == 0 && !irqs_disabled())

-#ifdef CONFIG_PREEMPTION
#define preempt_enable() \
do { \
barrier(); \
@@ -236,22 +237,6 @@ do { \
__preempt_schedule(); \
} while (0)

-#else /* !CONFIG_PREEMPTION */
-#define preempt_enable() \
-do { \
- barrier(); \
- preempt_count_dec(); \
-} while (0)
-
-#define preempt_enable_notrace() \
-do { \
- barrier(); \
- __preempt_count_dec(); \
-} while (0)
-
-#define preempt_check_resched() do { } while (0)
-#endif /* CONFIG_PREEMPTION */
-
#define preempt_disable_notrace() \
do { \
__preempt_count_inc(); \
@@ -264,27 +249,6 @@ do { \
__preempt_count_dec(); \
} while (0)

-#else /* !CONFIG_PREEMPT_COUNT */
-
-/*
- * Even if we don't have any preemption, we need preempt disable/enable
- * to be barriers, so that we don't have things like get_user/put_user
- * that can cause faults and scheduling migrate into our preempt-protected
- * region.
- */
-#define preempt_disable() barrier()
-#define sched_preempt_enable_no_resched() barrier()
-#define preempt_enable_no_resched() barrier()
-#define preempt_enable() barrier()
-#define preempt_check_resched() do { } while (0)
-
-#define preempt_disable_notrace() barrier()
-#define preempt_enable_no_resched_notrace() barrier()
-#define preempt_enable_notrace() barrier()
-#define preemptible() 0
-
-#endif /* CONFIG_PREEMPT_COUNT */
-
#ifdef MODULE
/*
* Modules have no business playing preemption tricks.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6dd206b2ef50..4dabd9530f98 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2088,9 +2088,7 @@ static inline bool test_tsk_need_resched_any(struct task_struct *tsk)
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
*/
-#ifndef CONFIG_PREEMPTION
-extern int _cond_resched(void);
-#else
+#ifdef CONFIG_PREEMPTION
static inline int _cond_resched(void) { return 0; }
#endif

--
2.31.1