[V2 PATCH 4/6] KVM: selftests: x86: Add helpers to execute VMs with private memory

From: Vishal Annapurve
Date: Mon Dec 05 2022 - 18:24:18 EST


Introduce a set of APIs to execute VM with private memslots.

Host userspace APIs for:
1) Executing a vcpu run loop that handles MAPGPA hypercall
2) Backing/unbacking guest private memory

Guest APIs for:
1) Changing memory mapping type

Signed-off-by: Vishal Annapurve <vannapurve@xxxxxxxxxx>
---
tools/testing/selftests/kvm/Makefile | 1 +
.../kvm/include/x86_64/private_mem.h | 24 +++
.../selftests/kvm/lib/x86_64/private_mem.c | 139 ++++++++++++++++++
3 files changed, 164 insertions(+)
create mode 100644 tools/testing/selftests/kvm/include/x86_64/private_mem.h
create mode 100644 tools/testing/selftests/kvm/lib/x86_64/private_mem.c

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 2275ba861e0e..97f7d52c553b 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -55,6 +55,7 @@ LIBKVM_x86_64 += lib/x86_64/apic.c
LIBKVM_x86_64 += lib/x86_64/handlers.S
LIBKVM_x86_64 += lib/x86_64/hyperv.c
LIBKVM_x86_64 += lib/x86_64/memstress.c
+LIBKVM_x86_64 += lib/x86_64/private_mem.c
LIBKVM_x86_64 += lib/x86_64/processor.c
LIBKVM_x86_64 += lib/x86_64/svm.c
LIBKVM_x86_64 += lib/x86_64/ucall.c
diff --git a/tools/testing/selftests/kvm/include/x86_64/private_mem.h b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
new file mode 100644
index 000000000000..3aa6b4d11b28
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/private_mem.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+
+#ifndef SELFTEST_KVM_PRIVATE_MEM_H
+#define SELFTEST_KVM_PRIVATE_MEM_H
+
+#include <stdint.h>
+#include <kvm_util.h>
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size);
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size);
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size);
+
+void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm, uint64_t gpa,
+ uint64_t npages, uint64_t attrs);
+
+void vcpu_run_and_handle_mapgpa(struct kvm_vm *vm, struct kvm_vcpu *vcpu);
+
+#endif /* SELFTEST_KVM_PRIVATE_MEM_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/private_mem.c b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
new file mode 100644
index 000000000000..2b97fc34ec4a
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/private_mem.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022, Google LLC.
+ */
+#define _GNU_SOURCE /* for program_invocation_name */
+#include <fcntl.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+
+#include <test_util.h>
+#include <kvm_util.h>
+#include <private_mem.h>
+#include <processor.h>
+
+static inline uint64_t __kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+ uint64_t flags)
+{
+ return kvm_hypercall(KVM_HC_MAP_GPA_RANGE, gpa, size >> PAGE_SHIFT, flags, 0);
+}
+
+static inline void kvm_hypercall_map_gpa_range(uint64_t gpa, uint64_t size,
+ uint64_t flags)
+{
+ uint64_t ret;
+
+ GUEST_ASSERT_2(IS_PAGE_ALIGNED(gpa) && IS_PAGE_ALIGNED(size), gpa, size);
+
+ ret = __kvm_hypercall_map_gpa_range(gpa, size, flags);
+ GUEST_ASSERT_1(!ret, ret);
+}
+
+void kvm_hypercall_map_shared(uint64_t gpa, uint64_t size)
+{
+ kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_DECRYPTED);
+}
+
+void kvm_hypercall_map_private(uint64_t gpa, uint64_t size)
+{
+ kvm_hypercall_map_gpa_range(gpa, size, KVM_MAP_GPA_RANGE_ENCRYPTED);
+}
+
+static void vm_update_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
+ bool unback_mem)
+{
+ int restricted_fd;
+ uint64_t restricted_fd_offset, guest_phys_base, fd_offset;
+ struct kvm_memory_attributes attr;
+ struct kvm_userspace_memory_region_ext *region_ext;
+ struct kvm_userspace_memory_region *region;
+ int fallocate_mode = 0;
+ int ret;
+
+ region_ext = kvm_userspace_memory_region_ext_find(vm, gpa, gpa + size);
+ TEST_ASSERT(region_ext != NULL, "Region not found");
+ region = &region_ext->region;
+ TEST_ASSERT(region->flags & KVM_MEM_PRIVATE,
+ "Can not update private memfd for non-private memslot\n");
+ restricted_fd = region_ext->restricted_fd;
+ restricted_fd_offset = region_ext->restricted_offset;
+ guest_phys_base = region->guest_phys_addr;
+ fd_offset = restricted_fd_offset + (gpa - guest_phys_base);
+
+ if (unback_mem)
+ fallocate_mode = (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE);
+
+ printf("restricted_fd %d fallocate_mode 0x%x for offset 0x%lx size 0x%lx\n",
+ restricted_fd, fallocate_mode, fd_offset, size);
+ ret = fallocate(restricted_fd, fallocate_mode, fd_offset, size);
+ TEST_ASSERT(ret == 0, "fallocate failed\n");
+ attr.attributes = unback_mem ? 0 : KVM_MEMORY_ATTRIBUTE_PRIVATE;
+ attr.address = gpa;
+ attr.size = size;
+ attr.flags = 0;
+ if (unback_mem)
+ printf("undoing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+ else
+ printf("doing encryption for gpa 0x%lx size 0x%lx\n", gpa, size);
+
+ vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr);
+}
+
+void vm_unback_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+ vm_update_private_mem(vm, gpa, size, true);
+}
+
+void vm_allocate_private_mem(struct kvm_vm *vm, uint64_t gpa, uint64_t size)
+{
+ vm_update_private_mem(vm, gpa, size, false);
+}
+
+void handle_vm_exit_map_gpa_hypercall(struct kvm_vm *vm, uint64_t gpa,
+ uint64_t npages, uint64_t attrs)
+{
+ uint64_t size;
+
+ size = npages << MIN_PAGE_SHIFT;
+ pr_info("Explicit conversion off 0x%lx size 0x%lx to %s\n", gpa, size,
+ (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED) ? "private" : "shared");
+
+ if (attrs & KVM_MAP_GPA_RANGE_ENCRYPTED)
+ vm_allocate_private_mem(vm, gpa, size);
+ else
+ vm_unback_private_mem(vm, gpa, size);
+}
+
+void vcpu_run_and_handle_mapgpa(struct kvm_vm *vm, struct kvm_vcpu *vcpu)
+{
+ /*
+ * Loop until the guest exits with any reason other than
+ * KVM_HC_MAP_GPA_RANGE hypercall.
+ */
+
+ while (true) {
+ vcpu_run(vcpu);
+
+ if ((vcpu->run->exit_reason == KVM_EXIT_HYPERCALL) &&
+ (vcpu->run->hypercall.nr == KVM_HC_MAP_GPA_RANGE)) {
+ uint64_t gpa = vcpu->run->hypercall.args[0];
+ uint64_t npages = vcpu->run->hypercall.args[1];
+ uint64_t attrs = vcpu->run->hypercall.args[2];
+
+ handle_vm_exit_map_gpa_hypercall(vm, gpa, npages, attrs);
+ vcpu->run->hypercall.ret = 0;
+ continue;
+ }
+
+ return;
+ }
+}
--
2.39.0.rc0.267.gcb52ba06e7-goog