[RFC PATCH] Reduce vdpa initialization / startup overhead for ioctl()

From: peili . dev
Date: Thu Apr 20 2023 - 01:22:02 EST


From: Pei Li <peili@xxxxxxxxxxxxxx>

Signed-off-by: Pei Li <peili@xxxxxxxxxxxxxx>
---
drivers/vhost/vdpa.c | 77 +++++++++++++++++++++++++++++++-
include/uapi/linux/vhost.h | 7 +++
include/uapi/linux/vhost_types.h | 1 +
tools/include/uapi/linux/vhost.h | 6 +++
4 files changed, 89 insertions(+), 2 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 7be9d9d8f01c..5419db1dfb7a 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -29,7 +29,8 @@ enum {
VHOST_VDPA_BACKEND_FEATURES =
(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
- (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
+ (1ULL << VHOST_BACKEND_F_IOTLB_ASID) |
+ (1ULL << VHOST_BACKEND_F_IOCTL_BATCH),
};

#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
@@ -521,6 +522,68 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v)
return ops->resume(vdpa);
}

+static long vhost_vdpa_vring_ioctl_batch(struct vhost_vdpa *v, unsigned int cmd,
+ void __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+
+ u32 idx, num, i;
+ long r;
+
+ r = get_user(num, ((struct vhost_vring_state __user *)argp)->num);
+ if (r < 0) {
+ return r;
+ }
+
+ num = array_index_nospec(num, v->nvqs);
+
+ struct vhost_vring_state states[num + 1];
+
+ if (copy_from_user(&states, argp, sizeof(states)))
+ return -EFAULT;
+
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE_BATCH:
+ for (i = 1; i <= num; i++) {
+ i = array_index_nospec(i, num + 1);
+ idx = states[i].index;
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, v->nvqs);
+
+ ops->set_vq_ready(vdpa, idx, 1);
+ }
+ return 0;
+ case VHOST_VDPA_GET_VRING_GROUP_BATCH:
+ if (!ops->get_vq_group)
+ return -EOPNOTSUPP;
+
+ for (i = 1; i <= num; i++) {
+ i = array_index_nospec(i, num + 1);
+ idx = states[i].index;
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+ idx = array_index_nospec(idx, v->nvqs);
+ states[i].num = ops->get_vq_group(vdpa, idx);
+ if (states[i].num >= vdpa->ngroups)
+ return -EIO;
+ }
+
+ if (copy_to_user(argp, &states, sizeof(states)))
+ return -EFAULT;
+
+ return 0;
+ default:
+ r = ENOIOCTLCMD;
+ }
+
+ return r;
+}
+
static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
void __user *argp)
{
@@ -533,6 +596,13 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
u32 idx;
long r;

+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE_BATCH:
+ // fall through
+ case VHOST_VDPA_GET_VRING_GROUP_BATCH:
+ return vhost_vdpa_vring_ioctl_batch(v, cmd, argp);
+ }
+
r = get_user(idx, (u32 __user *)argp);
if (r < 0)
return r;
@@ -630,7 +700,8 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
return -EFAULT;
if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
- BIT_ULL(VHOST_BACKEND_F_RESUME)))
+ BIT_ULL(VHOST_BACKEND_F_RESUME) |
+ BIT_ULL(VHOST_BACKEND_F_IOCTL_BATCH)))
return -EOPNOTSUPP;
if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
!vhost_vdpa_can_suspend(v))
@@ -638,6 +709,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
!vhost_vdpa_can_resume(v))
return -EOPNOTSUPP;
+
vhost_set_backend_features(&v->vdev, features);
return 0;
}
@@ -691,6 +763,7 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
if (vhost_vdpa_can_resume(v))
features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
+ features |= BIT_ULL(VHOST_BACKEND_F_IOCTL_BATCH);
if (copy_to_user(featurep, &features, sizeof(features)))
r = -EFAULT;
break;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 92e1b700b51c..edb8cc1b22c9 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -188,4 +188,11 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)

+
+#define VHOST_VDPA_SET_VRING_ENABLE_BATCH _IOW(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
+
+#define VHOST_VDPA_GET_VRING_GROUP_BATCH _IOWR(VHOST_VIRTIO, 0x82, \
+ struct vhost_vring_state)
+
#endif
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index c5690a8992d8..ea232fbd436a 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -166,4 +166,5 @@ struct vhost_vdpa_iova_range {
/* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5

+#define VHOST_BACKEND_F_IOCTL_BATCH 0x6
#endif
diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h
index 92e1b700b51c..d0ce141688ba 100644
--- a/tools/include/uapi/linux/vhost.h
+++ b/tools/include/uapi/linux/vhost.h
@@ -188,4 +188,10 @@
*/
#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E)

+#define VHOST_VDPA_SET_VRING_ENABLE_BATCH _IOW(VHOST_VIRTIO, 0x7F, \
+ struct vhost_vring_state)
+
+#define VHOST_VDPA_GET_VRING_GROUP_BATCH _IOWR(VHOST_VIRTIO, 0x82, \
+ struct vhost_vring_state)
+
#endif
--
2.25.1