[PATCH v4 4/4] KVM: selftests: Run dirty_log_perf_test on specific CPUs

From: Vipin Sharma
Date: Thu Oct 06 2022 - 13:12:21 EST


Add command line options, -c, to run the vCPUs and optionally the main
process on the specific CPUs on a host machine. This is useful as it
provides a way to analyze performance based on the vCPUs and dirty log
worker locations, like on the different numa nodes or on the same numa
nodes.

Link: https://lore.kernel.org/lkml/20220801151928.270380-1-vipinsh@xxxxxxxxxx
Signed-off-by: Vipin Sharma <vipinsh@xxxxxxxxxx>
Suggested-by: David Matlack <dmatlack@xxxxxxxxxx>
Suggested-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Suggested-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
.../selftests/kvm/dirty_log_perf_test.c | 23 +++++++-
.../selftests/kvm/include/perf_test_util.h | 6 ++
.../selftests/kvm/lib/perf_test_util.c | 58 ++++++++++++++++++-
3 files changed, 84 insertions(+), 3 deletions(-)

diff --git a/tools/testing/selftests/kvm/dirty_log_perf_test.c b/tools/testing/selftests/kvm/dirty_log_perf_test.c
index ecda802b78ff..33f83e423f75 100644
--- a/tools/testing/selftests/kvm/dirty_log_perf_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_perf_test.c
@@ -353,7 +353,7 @@ static void help(char *name)
puts("");
printf("usage: %s [-h] [-i iterations] [-p offset] [-g] "
"[-m mode] [-n] [-b vcpu bytes] [-v vcpus] [-o] [-s mem type]"
- "[-x memslots]\n", name);
+ "[-x memslots] [-c physical cpus to run test on]\n", name);
puts("");
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
TEST_HOST_LOOP_N);
@@ -383,6 +383,18 @@ static void help(char *name)
backing_src_help("-s");
printf(" -x: Split the memory region into this number of memslots.\n"
" (default: 1)\n");
+ printf(" -c: Comma separated values of the physical CPUs, which will run\n"
+ " the vCPUs, optionally, followed by the main application thread cpu.\n"
+ " Number of values must be at least the number of vCPUs.\n"
+ " The very next number is used to pin main application thread.\n\n"
+ " Example: ./dirty_log_perf_test -v 3 -c 22,23,24,50\n"
+ " This means that the vcpu 0 will run on the physical cpu 22,\n"
+ " vcpu 1 on the physical cpu 23, vcpu 2 on the physical cpu 24\n"
+ " and the main thread will run on cpu 50.\n\n"
+ " Example: ./dirty_log_perf_test -v 3 -c 22,23,24\n"
+ " Same as the previous example except now main application\n"
+ " thread can run on any physical cpu\n\n"
+ " (default: No cpu mapping)\n");
puts("");
exit(0);
}
@@ -398,6 +410,7 @@ int main(int argc, char *argv[])
.slots = 1,
};
int opt;
+ const char *pcpu_list = NULL;

dirty_log_manual_caps =
kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
@@ -406,11 +419,14 @@ int main(int argc, char *argv[])

guest_modes_append_default();

- while ((opt = getopt(argc, argv, "b:ef:ghi:m:nop:s:v:x:")) != -1) {
+ while ((opt = getopt(argc, argv, "b:c:ef:ghi:m:nop:s:v:x:")) != -1) {
switch (opt) {
case 'b':
guest_percpu_mem_size = parse_size(optarg);
break;
+ case 'c':
+ pcpu_list = optarg;
+ break;
case 'e':
/* 'e' is for evil. */
run_vcpus_while_disabling_dirty_logging = true;
@@ -458,6 +474,9 @@ int main(int argc, char *argv[])
}
}

+ if (pcpu_list)
+ perf_test_setup_pinning(pcpu_list, nr_vcpus);
+
TEST_ASSERT(p.iterations >= 2, "The test should have at least two iterations");

pr_info("Test iterations: %"PRIu64"\n", p.iterations);
diff --git a/tools/testing/selftests/kvm/include/perf_test_util.h b/tools/testing/selftests/kvm/include/perf_test_util.h
index eaa88df0555a..8197260e3b6b 100644
--- a/tools/testing/selftests/kvm/include/perf_test_util.h
+++ b/tools/testing/selftests/kvm/include/perf_test_util.h
@@ -27,6 +27,8 @@ struct perf_test_vcpu_args {
/* Only used by the host userspace part of the vCPU thread */
struct kvm_vcpu *vcpu;
int vcpu_idx;
+ /* The pCPU to which this vCPU is pinned. Only valid if pin_vcpus is true. */
+ int pcpu;
};

struct perf_test_args {
@@ -39,6 +41,8 @@ struct perf_test_args {

/* Run vCPUs in L2 instead of L1, if the architecture supports it. */
bool nested;
+ /* True if all vCPUs are pinned to pCPUs*/
+ bool pin_vcpus;

struct perf_test_vcpu_args vcpu_args[KVM_MAX_VCPUS];
};
@@ -60,4 +64,6 @@ void perf_test_guest_code(uint32_t vcpu_id);
uint64_t perf_test_nested_pages(int nr_vcpus);
void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]);

+void perf_test_setup_pinning(const char *pcpus_string, int nr_vcpus);
+
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c
index 9618b37c66f7..5d1aca0482b4 100644
--- a/tools/testing/selftests/kvm/lib/perf_test_util.c
+++ b/tools/testing/selftests/kvm/lib/perf_test_util.c
@@ -2,7 +2,10 @@
/*
* Copyright (C) 2020, Google LLC.
*/
+#define _GNU_SOURCE
+
#include <inttypes.h>
+#include <sched.h>

#include "kvm_util.h"
#include "perf_test_util.h"
@@ -240,9 +243,25 @@ void __weak perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_v
exit(KSFT_SKIP);
}

+static void pin_me_to_pcpu(int pcpu)
+{
+ cpu_set_t cpuset;
+ int err;
+
+ CPU_ZERO(&cpuset);
+ CPU_SET(pcpu, &cpuset);
+ err = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+ TEST_ASSERT(err == 0, "sched_setaffinity() errored out for pcpu: %d\n", pcpu);
+}
+
static void *vcpu_thread_main(void *data)
{
struct vcpu_thread *vcpu = data;
+ int idx = vcpu->vcpu_idx;
+ struct perf_test_vcpu_args *vcpu_args = &perf_test_args.vcpu_args[idx];
+
+ if (perf_test_args.pin_vcpus)
+ pin_me_to_pcpu(vcpu_args->pcpu);

WRITE_ONCE(vcpu->running, true);

@@ -255,7 +274,7 @@ static void *vcpu_thread_main(void *data)
while (!READ_ONCE(all_vcpu_threads_running))
;

- vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_idx]);
+ vcpu_thread_fn(vcpu_args);

return NULL;
}
@@ -292,3 +311,40 @@ void perf_test_join_vcpu_threads(int nr_vcpus)
for (i = 0; i < nr_vcpus; i++)
pthread_join(vcpu_threads[i].thread, NULL);
}
+
+static int pcpu_num(const char *cpu_str)
+{
+ int pcpu = atoi_paranoid(cpu_str);
+ TEST_ASSERT(pcpu >= 0, "Invalid cpu number: %d\n", pcpu);
+ return pcpu;
+}
+
+void perf_test_setup_pinning(const char *pcpus_string, int nr_vcpus)
+{
+ char delim[2] = ",";
+ char *cpu, *cpu_list;
+ int i = 0;
+
+ cpu_list = strdup(pcpus_string);
+ TEST_ASSERT(cpu_list, "strdup() allocation failed.\n");
+
+ cpu = strtok(cpu_list, delim);
+
+ // 1. Get all pcpus for vcpus
+ while (cpu && i < nr_vcpus) {
+ perf_test_args.vcpu_args[i++].pcpu = pcpu_num(cpu);
+ cpu = strtok(NULL, delim);
+ }
+
+ TEST_ASSERT(i == nr_vcpus,
+ "Number of pcpus (%d) not sufficient for the number of vcpus (%d).",
+ i, nr_vcpus);
+
+ perf_test_args.pin_vcpus = true;
+
+ // 2. Check if main worker is provided
+ if (cpu)
+ pin_me_to_pcpu(pcpu_num(cpu));
+
+ free(cpu_list);
+}
--
2.38.0.rc1.362.ged0d419d3c-goog