[PATCH v4 09/14] x86/sev: Add Secure TSC support for SNP guests

From: Nikunj A Dadhania
Date: Mon Aug 14 2023 - 01:55:23 EST


Add support for Secure TSC in SNP enabled guests. Secure TSC allows
guest to securely use RDTSC/RDTSCP instructions as the parameters
being used cannot be changed by hypervisor once the guest is launched.

During the boot-up of the secondary cpus, SecureTSC enabled guests
need to query TSC info from AMD Security Processor. This communication
channel is encrypted between the AMD Security Processor and the guest,
the hypervisor is just the conduit to deliver the guest messages to
the AMD Security Processor. Each message is protected with an
AEAD (AES-256 GCM). Use minimal AES GCM library to encrypt/decrypt SNP
Guest messages to communicate with the PSP.

Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx>
---
arch/x86/coco/core.c | 3 ++
arch/x86/include/asm/sev-guest.h | 18 +++++++
arch/x86/include/asm/sev.h | 2 +
arch/x86/include/asm/svm.h | 6 ++-
arch/x86/kernel/sev.c | 82 ++++++++++++++++++++++++++++++++
arch/x86/mm/mem_encrypt_amd.c | 6 +++
include/linux/cc_platform.h | 8 ++++
7 files changed, 123 insertions(+), 2 deletions(-)

diff --git a/arch/x86/coco/core.c b/arch/x86/coco/core.c
index 73f83233d25d..1cfb86c6bd78 100644
--- a/arch/x86/coco/core.c
+++ b/arch/x86/coco/core.c
@@ -89,6 +89,9 @@ static bool amd_cc_platform_has(enum cc_attr attr)
case CC_ATTR_GUEST_SEV_SNP:
return sev_status & MSR_AMD64_SEV_SNP_ENABLED;

+ case CC_ATTR_GUEST_SECURE_TSC:
+ return sev_status & MSR_AMD64_SNP_SECURE_TSC;
+
default:
return false;
}
diff --git a/arch/x86/include/asm/sev-guest.h b/arch/x86/include/asm/sev-guest.h
index e6f94208173d..58739173eba9 100644
--- a/arch/x86/include/asm/sev-guest.h
+++ b/arch/x86/include/asm/sev-guest.h
@@ -39,6 +39,8 @@ enum msg_type {
SNP_MSG_ABSORB_RSP,
SNP_MSG_VMRK_REQ,
SNP_MSG_VMRK_RSP,
+ SNP_MSG_TSC_INFO_REQ = 17,
+ SNP_MSG_TSC_INFO_RSP,

SNP_MSG_TYPE_MAX
};
@@ -111,6 +113,22 @@ struct snp_guest_req {
u8 msg_type;
};

+struct snp_tsc_info_req {
+#define SNP_TSC_INFO_REQ_SZ 128
+ /* Must be zero filled */
+ u8 rsvd[SNP_TSC_INFO_REQ_SZ];
+} __packed;
+
+struct snp_tsc_info_resp {
+ /* Status of TSC_INFO message */
+ u32 status;
+ u32 rsvd1;
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u32 tsc_factor;
+ u8 rsvd2[100];
+} __packed;
+
int snp_setup_psp_messaging(struct snp_guest_dev *snp_dev);
int snp_send_guest_request(struct snp_guest_dev *dev, struct snp_guest_req *req,
struct snp_guest_request_ioctl *rio);
diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h
index 807f85f8014c..d5b35da1b583 100644
--- a/arch/x86/include/asm/sev.h
+++ b/arch/x86/include/asm/sev.h
@@ -189,6 +189,7 @@ void snp_set_memory_private(unsigned long vaddr, unsigned int npages);
void snp_set_wakeup_secondary_cpu(void);
bool snp_init(struct boot_params *bp);
void __init __noreturn snp_abort(void);
+void __init snp_secure_tsc_prepare(void);
#else
static inline void sev_es_ist_enter(struct pt_regs *regs) { }
static inline void sev_es_ist_exit(void) { }
@@ -208,6 +209,7 @@ static inline void snp_set_memory_private(unsigned long vaddr, unsigned int npag
static inline void snp_set_wakeup_secondary_cpu(void) { }
static inline bool snp_init(struct boot_params *bp) { return false; }
static inline void snp_abort(void) { }
+static inline void __init snp_secure_tsc_prepare(void) { }
#endif

#endif
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index e7c7379d6ac7..3956c5095109 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -412,7 +412,9 @@ struct sev_es_save_area {
u8 reserved_0x298[80];
u32 pkru;
u32 tsc_aux;
- u8 reserved_0x2f0[24];
+ u64 tsc_scale;
+ u64 tsc_offset;
+ u8 reserved_0x300[8];
u64 rcx;
u64 rdx;
u64 rbx;
@@ -544,7 +546,7 @@ static inline void __unused_size_checks(void)
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x1c0);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x248);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x298);
- BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x2f0);
+ BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x300);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x320);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x380);
BUILD_BUG_RESERVED_OFFSET(sev_es_save_area, 0x3f0);
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index 72e76c58aebd..d55562cd395d 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -76,6 +76,10 @@ static u64 sev_hv_features __ro_after_init;
/* Secrets page physical address from the CC blob */
static u64 secrets_pa __ro_after_init;

+/* Secure TSC values read using TSC_INFO SNP Guest request */
+static u64 guest_tsc_scale __ro_after_init;
+static u64 guest_tsc_offset __ro_after_init;
+
/* #VC handler runtime per-CPU data */
struct sev_es_runtime_data {
struct ghcb ghcb_page;
@@ -1411,6 +1415,78 @@ bool snp_assign_vmpck(struct snp_guest_dev *dev, unsigned int vmpck_id)
}
EXPORT_SYMBOL_GPL(snp_assign_vmpck);

+static struct snp_guest_dev tsc_snp_dev __initdata;
+
+static int __init snp_get_tsc_info(void)
+{
+ static u8 buf[SNP_TSC_INFO_REQ_SZ + AUTHTAG_LEN];
+ struct snp_guest_request_ioctl rio;
+ struct snp_tsc_info_resp tsc_resp;
+ struct snp_tsc_info_req tsc_req;
+ struct snp_guest_req req;
+ int rc, resp_len;
+
+ /*
+ * The intermediate response buffer is used while decrypting the
+ * response payload. Make sure that it has enough space to cover the
+ * authtag.
+ */
+ resp_len = sizeof(tsc_resp) + AUTHTAG_LEN;
+ if (sizeof(buf) < resp_len)
+ return -EINVAL;
+
+ memset(&tsc_req, 0, sizeof(tsc_req));
+ memset(&req, 0, sizeof(req));
+ memset(&rio, 0, sizeof(rio));
+ memset(buf, 0, sizeof(buf));
+
+ if (!snp_assign_vmpck(&tsc_snp_dev, 0))
+ return -EINVAL;
+
+ /* Initialize the PSP channel to send snp messages */
+ if (snp_setup_psp_messaging(&tsc_snp_dev))
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+
+ req.msg_version = MSG_HDR_VER;
+ req.msg_type = SNP_MSG_TSC_INFO_REQ;
+ req.vmpck_id = tsc_snp_dev.vmpck_id;
+ req.req_buf = &tsc_req;
+ req.req_sz = sizeof(tsc_req);
+ req.resp_buf = buf;
+ req.resp_sz = resp_len;
+ req.exit_code = SVM_VMGEXIT_GUEST_REQUEST;
+ rc = snp_send_guest_request(&tsc_snp_dev, &req, &rio);
+ if (rc)
+ goto err_req;
+
+ memcpy(&tsc_resp, buf, sizeof(tsc_resp));
+ pr_debug("%s: Valid response status %x scale %llx offset %llx factor %x\n",
+ __func__, tsc_resp.status, tsc_resp.tsc_scale, tsc_resp.tsc_offset,
+ tsc_resp.tsc_factor);
+
+ guest_tsc_scale = tsc_resp.tsc_scale;
+ guest_tsc_offset = tsc_resp.tsc_offset;
+
+err_req:
+ /* The response buffer contains the sensitive data, explicitly clear it. */
+ memzero_explicit(buf, sizeof(buf));
+ memzero_explicit(&tsc_resp, sizeof(tsc_resp));
+ memzero_explicit(&req, sizeof(req));
+
+ return rc;
+}
+
+void __init snp_secure_tsc_prepare(void)
+{
+ if (!cc_platform_has(CC_ATTR_GUEST_SECURE_TSC))
+ return;
+
+ if (snp_get_tsc_info())
+ sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
+
+ pr_debug("SecureTSC enabled\n");
+}
+
static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
{
struct sev_es_save_area *cur_vmsa, *vmsa;
@@ -1511,6 +1587,12 @@ static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
vmsa->vmpl = 0;
vmsa->sev_features = sev_status >> 2;

+ /* Setting Secure TSC parameters */
+ if (cc_platform_has(CC_ATTR_GUEST_SECURE_TSC)) {
+ vmsa->tsc_scale = guest_tsc_scale;
+ vmsa->tsc_offset = guest_tsc_offset;
+ }
+
/* Switch the page over to a VMSA page now that it is initialized */
ret = snp_set_vmsa(vmsa, true);
if (ret) {
diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c
index e0b51c09109f..fc25749fb2e5 100644
--- a/arch/x86/mm/mem_encrypt_amd.c
+++ b/arch/x86/mm/mem_encrypt_amd.c
@@ -215,6 +215,11 @@ void __init sme_map_bootdata(char *real_mode_data)
__sme_early_map_unmap_mem(__va(cmdline_paddr), COMMAND_LINE_SIZE, true);
}

+void __init amd_enc_init(void)
+{
+ snp_secure_tsc_prepare();
+}
+
void __init sev_setup_arch(void)
{
phys_addr_t total_mem = memblock_phys_mem_size();
@@ -501,6 +506,7 @@ void __init sme_early_init(void)
x86_platform.guest.enc_status_change_finish = amd_enc_status_change_finish;
x86_platform.guest.enc_tlb_flush_required = amd_enc_tlb_flush_required;
x86_platform.guest.enc_cache_flush_required = amd_enc_cache_flush_required;
+ x86_platform.guest.enc_init = amd_enc_init;
}

void __init mem_encrypt_free_decrypted_mem(void)
diff --git a/include/linux/cc_platform.h b/include/linux/cc_platform.h
index cb0d6cd1c12f..e081ca4d5da2 100644
--- a/include/linux/cc_platform.h
+++ b/include/linux/cc_platform.h
@@ -90,6 +90,14 @@ enum cc_attr {
* Examples include TDX Guest.
*/
CC_ATTR_HOTPLUG_DISABLED,
+
+ /**
+ * @CC_ATTR_GUEST_SECURE_TSC: Secure TSC is active.
+ *
+ * The platform/OS is running as a guest/virtual machine and actively
+ * using AMD SEV-SNP Secure TSC feature.
+ */
+ CC_ATTR_GUEST_SECURE_TSC,
};

#ifdef CONFIG_ARCH_HAS_CC_PLATFORM
--
2.34.1