[PATCH v3 11/13] KVM: selftests / xen: map shared_info using HVA rather than GFN

From: Paul Durrant
Date: Mon Sep 18 2023 - 12:09:23 EST


From: Paul Durrant <pdurrant@xxxxxxxxxx>

Using the HVA of the shared_info page is more efficient, so if the
capability (KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) is present use that method
to do the mapping.

NOTE: Have the juggle_shinfo_state() thread map and unmap using both
GFN and HVA, to make sure the older mechanism is not broken.

Signed-off-by: Paul Durrant <pdurrant@xxxxxxxxxx>
---
Cc: Sean Christopherson <seanjc@xxxxxxxxxx>
Cc: Paolo Bonzini <pbonzini@xxxxxxxxxx>
Cc: David Woodhouse <dwmw2@xxxxxxxxxxxxx>

v3:
- Re-work the juggle_shinfo_state() thread

v2:
- New in this version.
---
.../selftests/kvm/x86_64/xen_shinfo_test.c | 44 +++++++++++++++----
1 file changed, 35 insertions(+), 9 deletions(-)

diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
index b0c3a00ea6a6..e786fa370140 100644
--- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
+++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
@@ -395,6 +395,7 @@ static int cmp_timespec(struct timespec *a, struct timespec *b)
return 0;
}

+static struct shared_info *shinfo;
static struct vcpu_info *vinfo;
static struct kvm_vcpu *vcpu;

@@ -410,20 +411,38 @@ static void *juggle_shinfo_state(void *arg)
{
struct kvm_vm *vm = (struct kvm_vm *)arg;

- struct kvm_xen_hvm_attr cache_activate = {
+ struct kvm_xen_hvm_attr cache_activate_gfn = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE
};

- struct kvm_xen_hvm_attr cache_deactivate = {
+ struct kvm_xen_hvm_attr cache_deactivate_gfn = {
.type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
.u.shared_info.gfn = KVM_XEN_INVALID_GFN
};

+ struct kvm_xen_hvm_attr cache_activate_hva = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA,
+ .u.shared_info.hva = (unsigned long)shinfo
+ };
+
+ struct kvm_xen_hvm_attr cache_deactivate_hva = {
+ .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
+ .u.shared_info.hva = 0
+ };
+
+ int xen_caps = kvm_check_cap(KVM_CAP_XEN_HVM);
+
for (;;) {
- __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate);
- __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate);
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_gfn);
pthread_testcancel();
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_gfn);
+
+ if (xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA) {
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_activate_hva);
+ pthread_testcancel();
+ __vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &cache_deactivate_hva);
+ }
}

return NULL;
@@ -449,6 +468,7 @@ int main(int argc, char *argv[])
bool do_eventfd_tests = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL);
bool do_evtchn_tests = do_eventfd_tests && !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
bool has_vcpu_id = !!(xen_caps & KVM_XEN_HVM_CONFIG_EVTCHN_SEND);
+ bool has_shinfo_hva = !!(xen_caps & KVM_XEN_HVM_CONFIG_SHARED_INFO_HVA);

clock_gettime(CLOCK_REALTIME, &min_ts);

@@ -459,7 +479,7 @@ int main(int argc, char *argv[])
SHINFO_REGION_GPA, SHINFO_REGION_SLOT, 3, 0);
virt_map(vm, SHINFO_REGION_GVA, SHINFO_REGION_GPA, 3);

- struct shared_info *shinfo = addr_gpa2hva(vm, SHINFO_VADDR);
+ shinfo = addr_gpa2hva(vm, SHINFO_VADDR);

int zero_fd = open("/dev/zero", O_RDONLY);
TEST_ASSERT(zero_fd != -1, "Failed to open /dev/zero");
@@ -503,10 +523,16 @@ int main(int argc, char *argv[])
vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &vid);
}

- struct kvm_xen_hvm_attr ha = {
- .type = KVM_XEN_ATTR_TYPE_SHARED_INFO,
- .u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE,
- };
+ struct kvm_xen_hvm_attr ha = {};
+
+ if (has_shinfo_hva) {
+ ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO_HVA;
+ ha.u.shared_info.hva = (unsigned long)shinfo;
+ } else {
+ ha.type = KVM_XEN_ATTR_TYPE_SHARED_INFO;
+ ha.u.shared_info.gfn = SHINFO_ADDR / PAGE_SIZE;
+ }
+
vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &ha);

/*
--
2.39.2