[RFC PATCH 11/12] HV/Netvsc: Add Isolation VM support for netvsc driver

From: Tianyu Lan
Date: Sun Feb 28 2021 - 10:09:21 EST


From: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>

Add Isolation VM support for netvsc driver. Map send/receive
ring buffer in extra address space in SNP isolation VM, reserve
bounce buffer for packets sent via vmbus_sendpacket_pagebuffer()
and release bounce buffer via hv_pkt_bounce() when get send
complete response from host.

Signed-off-by: Sunil Muthuswamy <sunilmut@xxxxxxxxxxxxx>
Co-Developed-by: Sunil Muthuswamy <sunilmut@xxxxxxxxxxxxx>
Signed-off-by: Tianyu Lan <Tianyu.Lan@xxxxxxxxxxxxx>
---
drivers/net/hyperv/hyperv_net.h | 3 +
drivers/net/hyperv/netvsc.c | 97 ++++++++++++++++++++++++++++++---
2 files changed, 92 insertions(+), 8 deletions(-)

diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 11266b92bcf0..45d5838ff128 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1027,14 +1027,17 @@ struct netvsc_device {

/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
+ void *recv_original_buf;
u32 recv_buf_size; /* allocated bytes */
u32 recv_buf_gpadl_handle;
u32 recv_section_cnt;
u32 recv_section_size;
u32 recv_completion_cnt;

+
/* Send buffer allocated by us */
void *send_buf;
+ void *send_original_buf;
u32 send_buf_size;
u32 send_buf_gpadl_handle;
u32 send_section_cnt;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 77657c5acc65..171af85e055d 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -26,7 +26,7 @@

#include "hyperv_net.h"
#include "netvsc_trace.h"
-
+#include "../../hv/hyperv_vmbus.h"
/*
* Switch the data path from the synthetic interface to the VF
* interface.
@@ -119,8 +119,21 @@ static void free_netvsc_device(struct rcu_head *head)
int i;

kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (nvdev->recv_original_buf) {
+ iounmap(nvdev->recv_buf);
+ vfree(nvdev->recv_original_buf);
+ } else {
+ vfree(nvdev->recv_buf);
+ }
+
+ if (nvdev->send_original_buf) {
+ iounmap(nvdev->send_buf);
+ vfree(nvdev->send_original_buf);
+ } else {
+ vfree(nvdev->send_buf);
+ }
+
kfree(nvdev->send_section_map);

for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
@@ -241,13 +254,18 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
+ void *recv_buf;
int ret;

if (net_device->recv_buf_gpadl_handle) {
+ if (net_device->recv_original_buf)
+ recv_buf = net_device->recv_original_buf;
+ else
+ recv_buf = net_device->recv_buf;
+
ret = vmbus_teardown_gpadl(device->channel,
net_device->recv_buf_gpadl_handle,
- net_device->recv_buf,
- net_device->recv_buf_size);
+ recv_buf, net_device->recv_buf_size);

/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -265,13 +283,18 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
struct netvsc_device *net_device,
struct net_device *ndev)
{
+ void *send_buf;
int ret;

if (net_device->send_buf_gpadl_handle) {
+ if (net_device->send_original_buf)
+ send_buf = net_device->send_original_buf;
+ else
+ send_buf = net_device->send_buf;
+
ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle,
- net_device->send_buf,
- net_device->send_buf_size);
+ send_buf, net_device->send_buf_size);

/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -306,9 +329,19 @@ static int netvsc_init_buf(struct hv_device *device,
struct nvsp_1_message_send_receive_buffer_complete *resp;
struct net_device *ndev = hv_get_drvdata(device);
struct nvsp_message *init_packet;
+ struct vm_struct *area;
+ u64 extra_phys;
unsigned int buf_size;
+ unsigned long vaddr;
size_t map_words;
- int ret = 0;
+ int ret = 0, i;
+
+ ret = hv_bounce_resources_reserve(device->channel,
+ PAGE_SIZE * 1024);
+ if (ret) {
+ pr_warn("Fail to reserve bounce buffer.\n");
+ return -ENOMEM;
+ }

/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -345,6 +378,28 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}

+ if (hv_isolation_type_snp()) {
+ area = get_vm_area(buf_size, VM_IOREMAP);
+ if (!area)
+ goto cleanup;
+
+ vaddr = (unsigned long)area->addr;
+ for (i = 0; i < buf_size / HV_HYP_PAGE_SIZE; i++) {
+ extra_phys = (virt_to_hvpfn(net_device->recv_buf + i * HV_HYP_PAGE_SIZE)
+ << HV_HYP_PAGE_SHIFT) + ms_hyperv.shared_gpa_boundary;
+ ret |= ioremap_page_range(vaddr + i * HV_HYP_PAGE_SIZE,
+ vaddr + (i + 1) * HV_HYP_PAGE_SIZE,
+ extra_phys, PAGE_KERNEL_IO);
+ }
+
+ if (ret)
+ goto cleanup;
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = (void*)vaddr;
+ }
+
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -435,12 +490,36 @@ static int netvsc_init_buf(struct hv_device *device,
buf_size,
&net_device->send_buf_gpadl_handle,
VMBUS_PAGE_VISIBLE_READ_WRITE);
+
if (ret != 0) {
netdev_err(ndev,
"unable to establish send buffer's gpadl\n");
goto cleanup;
}

+ if (hv_isolation_type_snp()) {
+ area = get_vm_area(buf_size , VM_IOREMAP);
+ if (!area)
+ goto cleanup;
+
+ vaddr = (unsigned long)area->addr;
+
+ for (i = 0; i < buf_size / HV_HYP_PAGE_SIZE; i++) {
+ extra_phys = (virt_to_hvpfn(net_device->send_buf + i * HV_HYP_PAGE_SIZE)
+ << HV_HYP_PAGE_SHIFT) + ms_hyperv.shared_gpa_boundary;
+ ret |= ioremap_page_range(vaddr + i * HV_HYP_PAGE_SIZE,
+ vaddr + (i + 1) * HV_HYP_PAGE_SIZE,
+ extra_phys, PAGE_KERNEL_IO);
+ }
+
+ if (ret)
+ goto cleanup;
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = (void*)vaddr;
+ }
+
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -747,6 +826,8 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);

+ if (desc->type == VM_PKT_COMP && packet->bounce_pkt)
+ hv_pkt_bounce(channel, packet->bounce_pkt);
napi_consume_skb(skb, budget);
}

--
2.25.1