[PATCH RFC v9 09/51] x86/sev: Add RMP entry lookup helpers

From: Michael Roth
Date: Mon Jun 12 2023 - 00:27:10 EST


From: Brijesh Singh <brijesh.singh@xxxxxxx>

The snp_lookup_page_in_rmptable() can be used by the host to read the RMP
entry for a given page. The RMP entry format is documented in AMD PPR, see
https://bugzilla.kernel.org/attachment.cgi?id=296015.

Co-developed-by: Ashish Kalra <ashish.kalra@xxxxxxx>
Signed-off-by: Ashish Kalra <ashish.kalra@xxxxxxx>
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
[mdr: separate 'assigned' indicator from return code]
Signed-off-by: Michael Roth <michael.roth@xxxxxxx>
---
arch/x86/coco/sev/host.c | 85 +++++++++++++++++++++++++++++++
arch/x86/include/asm/sev-common.h | 4 ++
arch/x86/include/asm/sev-host.h | 22 ++++++++
arch/x86/include/asm/sev.h | 3 --
4 files changed, 111 insertions(+), 3 deletions(-)
create mode 100644 arch/x86/include/asm/sev-host.h

diff --git a/arch/x86/coco/sev/host.c b/arch/x86/coco/sev/host.c
index 6907ce887b23..0cc5a6d11b25 100644
--- a/arch/x86/coco/sev/host.c
+++ b/arch/x86/coco/sev/host.c
@@ -30,11 +30,36 @@
#include <asm/cmdline.h>
#include <asm/iommu.h>

+/*
+ * The RMP entry format is not architectural. The format is defined in PPR
+ * Family 19h Model 01h, Rev B1 processor.
+ */
+struct rmpentry {
+ union {
+ struct {
+ u64 assigned : 1,
+ pagesize : 1,
+ immutable : 1,
+ rsvd1 : 9,
+ gpa : 39,
+ asid : 10,
+ vmsa : 1,
+ validated : 1,
+ rsvd2 : 1;
+ } info;
+ u64 low;
+ };
+ u64 high;
+} __packed;
+
/*
* The first 16KB from the RMP_BASE is used by the processor for the
* bookkeeping, the range needs to be added during the RMP entry lookup.
*/
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
+#define RMPENTRY_SHIFT 8
+#define rmptable_page_offset(x) (RMPTABLE_CPU_BOOKKEEPING_SZ + \
+ (((unsigned long)x) >> RMPENTRY_SHIFT))

static unsigned long rmptable_start __ro_after_init;
static unsigned long rmptable_end __ro_after_init;
@@ -210,3 +235,63 @@ static int __init snp_rmptable_init(void)
* the page(s) used for DMA are hypervisor owned.
*/
fs_initcall(snp_rmptable_init);
+
+static inline unsigned int rmpentry_assigned(const struct rmpentry *e)
+{
+ return e->info.assigned;
+}
+
+static inline unsigned int rmpentry_pagesize(const struct rmpentry *e)
+{
+ return e->info.pagesize;
+}
+
+static int rmptable_entry(unsigned long paddr, struct rmpentry *entry)
+{
+ unsigned long vaddr;
+
+ vaddr = rmptable_start + rmptable_page_offset(paddr);
+ if (unlikely(vaddr > rmptable_end))
+ return -EFAULT;
+
+ *entry = *(struct rmpentry *)vaddr;
+
+ return 0;
+}
+
+static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *entry, int *level)
+{
+ unsigned long paddr = pfn << PAGE_SHIFT;
+ struct rmpentry large_entry;
+ int ret;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SEV_SNP))
+ return -ENXIO;
+
+ ret = rmptable_entry(paddr, entry);
+ if (ret)
+ return ret;
+
+ /* Read a large RMP entry to get the correct page level used in RMP entry. */
+ ret = rmptable_entry(paddr & PMD_MASK, &large_entry);
+ if (ret)
+ return ret;
+
+ *level = RMP_TO_X86_PG_LEVEL(rmpentry_pagesize(&large_entry));
+
+ return 0;
+}
+
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level)
+{
+ struct rmpentry e;
+ int ret;
+
+ ret = __snp_lookup_rmpentry(pfn, &e, level);
+ if (ret)
+ return ret;
+
+ *assigned = !!rmpentry_assigned(&e);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snp_lookup_rmpentry);
diff --git a/arch/x86/include/asm/sev-common.h b/arch/x86/include/asm/sev-common.h
index b8357d6ecd47..bf0378136289 100644
--- a/arch/x86/include/asm/sev-common.h
+++ b/arch/x86/include/asm/sev-common.h
@@ -171,4 +171,8 @@ struct snp_psc_desc {
#define GHCB_ERR_INVALID_INPUT 5
#define GHCB_ERR_INVALID_EVENT 6

+/* RMP page size */
+#define RMP_PG_SIZE_4K 0
+#define RMP_TO_X86_PG_LEVEL(level) (((level) == RMP_PG_SIZE_4K) ? PG_LEVEL_4K : PG_LEVEL_2M)
+
#endif
diff --git a/arch/x86/include/asm/sev-host.h b/arch/x86/include/asm/sev-host.h
new file mode 100644
index 000000000000..30d47e20081d
--- /dev/null
+++ b/arch/x86/include/asm/sev-host.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * AMD SVM-SEV Host Support.
+ *
+ * Copyright (C) 2023 Advanced Micro Devices, Inc.
+ *
+ * Author: Ashish Kalra <ashish.kalra@xxxxxxx>
+ *
+ */
+
+#ifndef __ASM_X86_SEV_HOST_H
+#define __ASM_X86_SEV_HOST_H
+
+#include <asm/sev-common.h>
+
+#ifdef CONFIG_KVM_AMD_SEV
+int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level);
+#else
+static inline int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) { return 0; }
+#endif
+
+#endif
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index d34c46db7dd1..446fc7a9f7b0 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -81,9 +81,6 @@ extern bool handle_vc_boot_ghcb(struct pt_regs *regs);
/* Software defined (when rFlags.CF = 1) */
#define PVALIDATE_FAIL_NOUPDATE 255

-/* RMP page size */
-#define RMP_PG_SIZE_4K 0
-
#define RMPADJUST_VMSA_PAGE_BIT BIT(16)

/* SNP Guest message request */
--
2.25.1