[V3 01/11] KVM: selftests: move vm_phy_pages_alloc() earlier in file

From: Peter Gonda
Date: Wed Aug 10 2022 - 11:20:48 EST


From: Michael Roth <michael.roth@xxxxxxx>

Subsequent patches will break some of this code out into file-local
helper functions, which will be used by functions like vm_vaddr_alloc(),
which currently are defined earlier in the file, so a forward
declaration would be needed.

Instead, move it earlier in the file, just above vm_vaddr_alloc() and
and friends, which are the main users.

Reviewed-by: Mingwei Zhang <mizhang@xxxxxxxxxx>
Reviewed-by: Andrew Jones <andrew.jones@xxxxxxxxx>
Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
Signed-off-by: Peter Gonda <pgonda@xxxxxxxxxx>
---
tools/testing/selftests/kvm/lib/kvm_util.c | 145 ++++++++++-----------
1 file changed, 72 insertions(+), 73 deletions(-)

diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 9889fe0d8919..cb3a5f8a53b7 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -1089,6 +1089,78 @@ struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id)
return vcpu;
}

+/*
+ * Physical Contiguous Page Allocator
+ *
+ * Input Args:
+ * vm - Virtual Machine
+ * num - number of pages
+ * paddr_min - Physical address minimum
+ * memslot - Memory region to allocate page from
+ *
+ * Output Args: None
+ *
+ * Return:
+ * Starting physical address
+ *
+ * Within the VM specified by vm, locates a range of available physical
+ * pages at or above paddr_min. If found, the pages are marked as in use
+ * and their base address is returned. A TEST_ASSERT failure occurs if
+ * not enough pages are available at or above paddr_min.
+ */
+vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
+ vm_paddr_t paddr_min, uint32_t memslot)
+{
+ struct userspace_mem_region *region;
+ sparsebit_idx_t pg, base;
+
+ TEST_ASSERT(num > 0, "Must allocate at least one page");
+
+ TEST_ASSERT((paddr_min % vm->page_size) == 0,
+ "Min physical address not divisible by page size.\n paddr_min: 0x%lx page_size: 0x%x",
+ paddr_min, vm->page_size);
+
+ region = memslot2region(vm, memslot);
+ base = pg = paddr_min >> vm->page_shift;
+
+ do {
+ for (; pg < base + num; ++pg) {
+ if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
+ base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
+ break;
+ }
+ }
+ } while (pg && pg != base + num);
+
+ if (pg == 0) {
+ fprintf(stderr,
+ "No guest physical page available, paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
+ paddr_min, vm->page_size, memslot);
+ fputs("---- vm dump ----\n", stderr);
+ vm_dump(stderr, vm, 2);
+ abort();
+ }
+
+ for (pg = base; pg < base + num; ++pg)
+ sparsebit_clear(region->unused_phy_pages, pg);
+
+ return base * vm->page_size;
+}
+
+vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
+ uint32_t memslot)
+{
+ return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
+}
+
+/* Arbitrary minimum physical address used for virtual translation tables. */
+#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
+
+vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
+{
+ return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
+}
+
/*
* VM Virtual Address Unused Gap
*
@@ -1735,79 +1807,6 @@ const char *exit_reason_str(unsigned int exit_reason)
return "Unknown";
}

-/*
- * Physical Contiguous Page Allocator
- *
- * Input Args:
- * vm - Virtual Machine
- * num - number of pages
- * paddr_min - Physical address minimum
- * memslot - Memory region to allocate page from
- *
- * Output Args: None
- *
- * Return:
- * Starting physical address
- *
- * Within the VM specified by vm, locates a range of available physical
- * pages at or above paddr_min. If found, the pages are marked as in use
- * and their base address is returned. A TEST_ASSERT failure occurs if
- * not enough pages are available at or above paddr_min.
- */
-vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
- vm_paddr_t paddr_min, uint32_t memslot)
-{
- struct userspace_mem_region *region;
- sparsebit_idx_t pg, base;
-
- TEST_ASSERT(num > 0, "Must allocate at least one page");
-
- TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
- "not divisible by page size.\n"
- " paddr_min: 0x%lx page_size: 0x%x",
- paddr_min, vm->page_size);
-
- region = memslot2region(vm, memslot);
- base = pg = paddr_min >> vm->page_shift;
-
- do {
- for (; pg < base + num; ++pg) {
- if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
- base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
- break;
- }
- }
- } while (pg && pg != base + num);
-
- if (pg == 0) {
- fprintf(stderr, "No guest physical page available, "
- "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
- paddr_min, vm->page_size, memslot);
- fputs("---- vm dump ----\n", stderr);
- vm_dump(stderr, vm, 2);
- abort();
- }
-
- for (pg = base; pg < base + num; ++pg)
- sparsebit_clear(region->unused_phy_pages, pg);
-
- return base * vm->page_size;
-}
-
-vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
- uint32_t memslot)
-{
- return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
-}
-
-/* Arbitrary minimum physical address used for virtual translation tables. */
-#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
-
-vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
-{
- return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
-}
-
/*
* Address Guest Virtual to Host Virtual
*
--
2.37.1.559.g78731f0fdb-goog