[RFC PATCH 26/86] Revert "preempt/dynamic: Support dynamic preempt with preempt= boot option"

From: Ankur Arora
Date: Tue Nov 07 2023 - 17:02:38 EST


This reverts commit 826bfeb37bb4302ee6042f330c4c0c757152bdb8.

Signed-off-by: Ankur Arora <ankur.a.arora@xxxxxxxxxx>
---
kernel/sched/core.c | 68 +--------------------------------------------
1 file changed, 1 insertion(+), 67 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b8dacc7feb47..51df0b62f519 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -6954,75 +6954,9 @@ DEFINE_STATIC_CALL(preempt_schedule_notrace, __preempt_schedule_notrace_func);
EXPORT_STATIC_CALL(preempt_schedule_notrace);
#endif

+
#endif /* CONFIG_PREEMPTION */

-#ifdef CONFIG_PREEMPT_DYNAMIC
-
-#include <linux/entry-common.h>
-
-/*
- * SC:cond_resched
- * SC:might_resched
- * SC:preempt_schedule
- * SC:preempt_schedule_notrace
- * SC:irqentry_exit_cond_resched
- *
- *
- * NONE:
- * cond_resched <- __cond_resched
- * might_resched <- RET0
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
- *
- * VOLUNTARY:
- * cond_resched <- __cond_resched
- * might_resched <- __cond_resched
- * preempt_schedule <- NOP
- * preempt_schedule_notrace <- NOP
- * irqentry_exit_cond_resched <- NOP
- *
- * FULL:
- * cond_resched <- RET0
- * might_resched <- RET0
- * preempt_schedule <- preempt_schedule
- * preempt_schedule_notrace <- preempt_schedule_notrace
- * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
- */
-static int __init setup_preempt_mode(char *str)
-{
- if (!strcmp(str, "none")) {
- static_call_update(cond_resched, __cond_resched);
- static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
- static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
- static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
- static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
- pr_info("Dynamic Preempt: %s\n", str);
- } else if (!strcmp(str, "voluntary")) {
- static_call_update(cond_resched, __cond_resched);
- static_call_update(might_resched, __cond_resched);
- static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL);
- static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL);
- static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL);
- pr_info("Dynamic Preempt: %s\n", str);
- } else if (!strcmp(str, "full")) {
- static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0);
- static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0);
- static_call_update(preempt_schedule, __preempt_schedule_func);
- static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
- static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
- pr_info("Dynamic Preempt: %s\n", str);
- } else {
- pr_warn("Dynamic Preempt: Unsupported preempt mode %s, default to full\n", str);
- return 1;
- }
- return 0;
-}
-__setup("preempt=", setup_preempt_mode);
-
-#endif /* CONFIG_PREEMPT_DYNAMIC */
-
-
/*
* This is the entry point to schedule() from kernel preemption
* off of irq context.
--
2.31.1