[PATCH v3 07/11] iommu: Prepare for separating SVA and IOPF

From: Lu Baolu
Date: Thu Aug 17 2023 - 19:45:00 EST


Move iopf_group data structure to iommu.h. This is being done to make it
a minimal set of faults that a domain's page fault handler should handle.

Add two new helpers for the domain's page fault handler:
- iopf_free_group: free a fault group after all faults in the group are
handled.
- iopf_queue_work: queue a given work item for a fault group.

This will simplify the sequential patches.

Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
include/linux/iommu.h | 12 ++++++++++
drivers/iommu/io-pgfault.c | 49 ++++++++++++++++++++++----------------
2 files changed, 41 insertions(+), 20 deletions(-)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 8243d72098ea..ff292eea9d31 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -516,6 +516,18 @@ struct dev_iommu {
u32 require_direct:1;
};

+struct iopf_fault {
+ struct iommu_fault fault;
+ struct list_head list;
+};
+
+struct iopf_group {
+ struct iopf_fault last_fault;
+ struct list_head faults;
+ struct work_struct work;
+ struct device *dev;
+};
+
int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops,
struct device *hwdev);
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 31832aeacdba..d07586cd37fd 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -25,17 +25,17 @@ struct iopf_queue {
struct mutex lock;
};

-struct iopf_fault {
- struct iommu_fault fault;
- struct list_head list;
-};
+static void iopf_free_group(struct iopf_group *group)
+{
+ struct iopf_fault *iopf, *next;

-struct iopf_group {
- struct iopf_fault last_fault;
- struct list_head faults;
- struct work_struct work;
- struct device *dev;
-};
+ list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
+ kfree(iopf);
+ }
+
+ kfree(group);
+}

static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
enum iommu_page_response_code status)
@@ -55,9 +55,9 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,

static void iopf_handler(struct work_struct *work)
{
+ struct iopf_fault *iopf;
struct iopf_group *group;
struct iommu_domain *domain;
- struct iopf_fault *iopf, *next;
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;

group = container_of(work, struct iopf_group, work);
@@ -66,7 +66,7 @@ static void iopf_handler(struct work_struct *work)
if (!domain || !domain->iopf_handler)
status = IOMMU_PAGE_RESP_INVALID;

- list_for_each_entry_safe(iopf, next, &group->faults, list) {
+ list_for_each_entry(iopf, &group->faults, list) {
/*
* For the moment, errors are sticky: don't handle subsequent
* faults in the group if there is an error.
@@ -74,14 +74,21 @@ static void iopf_handler(struct work_struct *work)
if (status == IOMMU_PAGE_RESP_SUCCESS)
status = domain->iopf_handler(&iopf->fault,
domain->fault_data);
-
- if (!(iopf->fault.prm.flags &
- IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE))
- kfree(iopf);
}

iopf_complete_group(group->dev, &group->last_fault, status);
- kfree(group);
+ iopf_free_group(group);
+}
+
+static int iopf_queue_work(struct iopf_group *group, work_func_t func)
+{
+ struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
+
+ INIT_WORK(&group->work, func);
+ if (!queue_work(fault_param->queue->wq, &group->work))
+ return -EBUSY;
+
+ return 0;
}

/**
@@ -174,7 +181,6 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
group->last_fault.fault = *fault;
INIT_LIST_HEAD(&group->faults);
list_add(&group->last_fault.list, &group->faults);
- INIT_WORK(&group->work, iopf_handler);

/* See if we have partial faults for this group */
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
@@ -183,8 +189,11 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
list_move(&iopf->list, &group->faults);
}

- queue_work(iopf_param->queue->wq, &group->work);
- return 0;
+ ret = iopf_queue_work(group, iopf_handler);
+ if (ret)
+ iopf_free_group(group);
+
+ return ret;

cleanup_partial:
list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) {
--
2.34.1