[RFCv2 04/16] x86/kvm: Use bounce buffers for KVM memory protection

From: Kirill A. Shutemov
Date: Tue Oct 20 2020 - 02:21:38 EST


Mirror SEV, use SWIOTLB always if KVM memory protection is enabled.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@xxxxxxxxxxxxxxx>
---
arch/x86/Kconfig | 1 +
arch/x86/kernel/kvm.c | 2 ++
arch/x86/kernel/pci-swiotlb.c | 3 ++-
arch/x86/mm/mem_encrypt.c | 21 ---------------------
arch/x86/mm/mem_encrypt_common.c | 23 +++++++++++++++++++++++
5 files changed, 28 insertions(+), 22 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cd272e3babbc..b22b95517437 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -806,6 +806,7 @@ config KVM_GUEST
select ARCH_CPUIDLE_HALTPOLL
select X86_HV_CALLBACK_VECTOR
select X86_MEM_ENCRYPT_COMMON
+ select SWIOTLB
default y
help
This option enables various optimizations for running under the KVM
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 2c1f8952b92a..30bb3d2d6ccd 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/swiotlb.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -759,6 +760,7 @@ static void __init kvm_init_platform(void)
pr_info("KVM memory protection enabled\n");
mem_protected = true;
setup_force_cpu_cap(X86_FEATURE_KVM_MEM_PROTECTED);
+ swiotlb_force = SWIOTLB_FORCE;
}
}

diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index c2cfa5e7c152..814060a6ceb0 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -13,6 +13,7 @@
#include <asm/dma.h>
#include <asm/xen/swiotlb-xen.h>
#include <asm/iommu_table.h>
+#include <asm/kvm_para.h>

int swiotlb __read_mostly;

@@ -49,7 +50,7 @@ int __init pci_swiotlb_detect_4gb(void)
* buffers are allocated and used for devices that do not support
* the addressing range required for the encryption mask.
*/
- if (sme_active())
+ if (sme_active() || kvm_mem_protected())
swiotlb = 1;

return swiotlb;
diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c
index 4dbdc9dac36b..5de64e068b0a 100644
--- a/arch/x86/mm/mem_encrypt.c
+++ b/arch/x86/mm/mem_encrypt.c
@@ -369,24 +369,3 @@ void __init mem_encrypt_free_decrypted_mem(void)

free_init_pages("unused decrypted", vaddr, vaddr_end);
}
-
-/* Architecture __weak replacement functions */
-void __init mem_encrypt_init(void)
-{
- if (!sme_me_mask)
- return;
-
- /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
- swiotlb_update_mem_attributes();
-
- /*
- * With SEV, we need to unroll the rep string I/O instructions.
- */
- if (sev_active())
- static_branch_enable(&sev_enable_key);
-
- pr_info("AMD %s active\n",
- sev_active() ? "Secure Encrypted Virtualization (SEV)"
- : "Secure Memory Encryption (SME)");
-}
-
diff --git a/arch/x86/mm/mem_encrypt_common.c b/arch/x86/mm/mem_encrypt_common.c
index a878e7f246d5..7900f3788010 100644
--- a/arch/x86/mm/mem_encrypt_common.c
+++ b/arch/x86/mm/mem_encrypt_common.c
@@ -37,3 +37,26 @@ bool force_dma_unencrypted(struct device *dev)

return false;
}
+
+void __init mem_encrypt_init(void)
+{
+ if (!sme_me_mask && !kvm_mem_protected())
+ return;
+
+ /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
+ swiotlb_update_mem_attributes();
+
+ /*
+ * With SEV, we need to unroll the rep string I/O instructions.
+ */
+ if (sev_active())
+ static_branch_enable(&sev_enable_key);
+
+ if (sme_me_mask) {
+ pr_info("AMD %s active\n",
+ sev_active() ? "Secure Encrypted Virtualization (SEV)"
+ : "Secure Memory Encryption (SME)");
+ } else {
+ pr_info("KVM memory protection enabled\n");
+ }
+}
--
2.26.2