[RFC PATCH v4 08/10] KVM: x86: Add gmem hook for invalidating private memory

From: isaku . yamahata
Date: Thu Jul 20 2023 - 19:33:43 EST


From: Michael Roth <michael.roth@xxxxxxx>

TODO: add a CONFIG option that can be to completely skip arch
invalidation loop and avoid __weak references for arch/platforms that
don't need an additional invalidation hook.

In some cases, like with SEV-SNP, guest memory needs to be updated in a
platform-specific manner before it can be safely freed back to the host.
Add hooks to wire up handling of this sort when freeing memory in
response to FALLOC_FL_PUNCH_HOLE operations.

Also issue invalidations of all allocated pages when releasing the gmem
file so that the pages are not left in an unusable state when they get
freed back to the host.

Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
Link: https://lore.kernel.org/r/20230612042559.375660-3-michael.roth@xxxxxxx

---
Changes v2 -> v3:
- Newly added
---
arch/x86/include/asm/kvm-x86-ops.h | 1 +
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/x86.c | 6 +++++
include/linux/kvm_host.h | 3 +++
virt/kvm/guest_mem.c | 42 ++++++++++++++++++++++++++++++
5 files changed, 53 insertions(+)

diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index a4cb248519cf..d520c6370cd6 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -135,6 +135,7 @@ KVM_X86_OP(complete_emulated_msr)
KVM_X86_OP(vcpu_deliver_sipi_vector)
KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons);
KVM_X86_OP_OPTIONAL_RET0(gmem_prepare)
+KVM_X86_OP_OPTIONAL(gmem_invalidate)

#undef KVM_X86_OP
#undef KVM_X86_OP_OPTIONAL
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index de7f0dffa135..440a4a13a93f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1738,6 +1738,7 @@ struct kvm_x86_ops {

int (*gmem_prepare)(struct kvm *kvm, struct kvm_memory_slot *slot,
kvm_pfn_t pfn, gfn_t gfn, u8 *max_level);
+ void (*gmem_invalidate)(struct kvm *kvm, kvm_pfn_t start, kvm_pfn_t end);
};

struct kvm_x86_nested_ops {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fd6c05d1883c..2ae40fa8e178 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -13284,6 +13284,12 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_arch_no_poll);

+#ifdef CONFIG_KVM_PRIVATE_MEM
+void kvm_arch_gmem_invalidate(struct kvm *kvm, kvm_pfn_t start, kvm_pfn_t end)
+{
+ static_call_cond(kvm_x86_gmem_invalidate)(kvm, start, end);
+}
+#endif

int kvm_spec_ctrl_test_value(u64 value)
{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index ce4d91585368..6c5d39e429e9 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2360,6 +2360,7 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
#ifdef CONFIG_KVM_PRIVATE_MEM
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+void kvm_arch_gmem_invalidate(struct kvm *kvm, kvm_pfn_t start, kvm_pfn_t end);
#else
static inline int kvm_gmem_get_pfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
@@ -2368,6 +2369,8 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
KVM_BUG_ON(1, kvm);
return -EIO;
}
+
+void kvm_arch_gmem_invalidate(struct kvm *kvm, kvm_pfn_t start, kvm_pfn_t end) { }
#endif /* CONFIG_KVM_PRIVATE_MEM */

#endif
diff --git a/virt/kvm/guest_mem.c b/virt/kvm/guest_mem.c
index ac185c776cda..a14eaac9dbad 100644
--- a/virt/kvm/guest_mem.c
+++ b/virt/kvm/guest_mem.c
@@ -129,6 +129,46 @@ static void kvm_gmem_invalidate_end(struct kvm_gmem *gmem, pgoff_t start,
KVM_MMU_UNLOCK(kvm);
}

+void __weak kvm_arch_gmem_invalidate(struct kvm *kvm, kvm_pfn_t start, kvm_pfn_t end)
+{
+}
+
+/* Handle arch-specific hooks needed before releasing guarded pages. */
+static void kvm_gmem_issue_arch_invalidate(struct kvm *kvm, struct inode *inode,
+ pgoff_t start, pgoff_t end)
+{
+ pgoff_t file_end = i_size_read(inode) >> PAGE_SHIFT;
+ pgoff_t index = start;
+
+ end = min(end, file_end);
+
+ while (index < end) {
+ struct folio *folio;
+ unsigned int order;
+ struct page *page;
+ kvm_pfn_t pfn;
+
+ folio = __filemap_get_folio(inode->i_mapping, index,
+ FGP_LOCK, 0);
+ if (!folio) {
+ index++;
+ continue;
+ }
+
+ page = folio_file_page(folio, index);
+ pfn = page_to_pfn(page);
+ order = folio_order(folio);
+
+ kvm_arch_gmem_invalidate(kvm, pfn, pfn + min((1ul << order), end - index));
+
+ index = folio_next_index(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+
+ cond_resched();
+ }
+}
+
static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct list_head *gmem_list = &inode->i_mapping->private_list;
@@ -145,6 +185,7 @@ static long kvm_gmem_punch_hole(struct inode *inode, loff_t offset, loff_t len)
list_for_each_entry(gmem, gmem_list, entry)
kvm_gmem_invalidate_begin(gmem, start, end);

+ kvm_gmem_issue_arch_invalidate(gmem->kvm, inode, start, end);
truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1);

list_for_each_entry(gmem, gmem_list, entry)
@@ -255,6 +296,7 @@ static int kvm_gmem_release(struct inode *inode, struct file *file)
* memory, as its lifetime is associated with the inode, not the file.
*/
kvm_gmem_invalidate_begin(gmem, 0, -1ul);
+ kvm_gmem_issue_arch_invalidate(gmem->kvm, inode, 0, -1ul);
kvm_gmem_invalidate_end(gmem, 0, -1ul);

mutex_unlock(&kvm->slots_lock);
--
2.25.1