[PATCH tip/core/rcu 11/24] rcutorture: Add rcuperf holdoff boot parameter to reduce interference

From: Paul E. McKenney
Date: Tue Apr 12 2016 - 11:49:53 EST


Boot-time activity can legitimately grab CPUs for extended time periods,
so the commit adds a boot parameter to delay the start of the performance
test until boot has completed. Defaults to 10 seconds.

Signed-off-by: Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>
---
Documentation/kernel-parameters.txt | 6 ++++++
kernel/rcu/rcuperf.c | 5 +++++
2 files changed, 11 insertions(+)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 951af481da5a..da9ee466789b 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3288,6 +3288,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Measure performance of expedited synchronous
grace-period primitives.

+ rcuperf.holdoff= [KNL]
+ Set test-start holdoff period. The purpose of
+ this parameter is to delay the start of the
+ test until boot completes in order to avoid
+ interference.
+
rcuperf.nreaders= [KNL]
Set number of RCU readers. The value -1 selects
N, where N is the number of CPUs. A value
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
index 12561f96f0a2..278600143bb6 100644
--- a/kernel/rcu/rcuperf.c
+++ b/kernel/rcu/rcuperf.c
@@ -59,6 +59,7 @@ MODULE_AUTHOR("Paul E. McKenney <paulmck@xxxxxxxxxxxxxxxxxx>");
do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)

torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
+torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
torture_param(int, nreaders, -1, "Number of RCU reader threads");
torture_param(int, nwriters, -1, "Number of RCU updater threads");
torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
@@ -368,6 +369,10 @@ rcu_perf_writer(void *arg)
set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
sp.sched_priority = 1;
sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+ if (holdoff)
+ schedule_timeout_uninterruptible(holdoff * HZ);
+
t = ktime_get_mono_fast_ns();
if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
t_rcu_perf_writer_started = t;
--
2.5.2