[PATCH v3 2/8] iommu/sva: Use iopf domain attach/detach interface
From: Lu Baolu
Date: Mon Jan 22 2024 - 02:45:19 EST
The iommu sva implementation relies on iopf handling. Allocate an
attachment cookie and use the iopf domain attach/detach interface.
The SVA domain is guaranteed to be released after all outstanding
page faults are handled.
In the fault delivering path, the attachment cookie is retrieved,
instead of the domain. This ensures that the page fault is forwarded
only if an iopf-capable domain is attached, and the domain will only
be released after all outstanding faults are handled.
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
---
include/linux/iommu.h | 2 +-
drivers/iommu/io-pgfault.c | 59 +++++++++++++++++++-------------------
drivers/iommu/iommu-sva.c | 48 ++++++++++++++++++++++++-------
3 files changed, 68 insertions(+), 41 deletions(-)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 6d85be23952a..511dc7b4bdb2 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -142,9 +142,9 @@ struct iopf_group {
/* list node for iommu_fault_param::faults */
struct list_head pending_node;
struct work_struct work;
- struct iommu_domain *domain;
/* The device's fault data parameter. */
struct iommu_fault_param *fault_param;
+ struct iopf_attach_cookie *cookie;
};
/**
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index f7ce41573799..2567d8c04e46 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -40,7 +40,7 @@ static void iopf_put_dev_fault_param(struct iommu_fault_param *fault_param)
}
/* Get the domain attachment cookie for pasid of a device. */
-static struct iopf_attach_cookie __maybe_unused *
+static struct iopf_attach_cookie *
iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid)
{
struct iommu_fault_param *iopf_param = iopf_get_dev_fault_param(dev);
@@ -147,6 +147,7 @@ static void __iopf_free_group(struct iopf_group *group)
/* Pair with iommu_report_device_fault(). */
iopf_put_dev_fault_param(group->fault_param);
+ iopf_pasid_cookie_put(group->cookie);
}
void iopf_free_group(struct iopf_group *group)
@@ -156,30 +157,6 @@ void iopf_free_group(struct iopf_group *group)
}
EXPORT_SYMBOL_GPL(iopf_free_group);
-static struct iommu_domain *get_domain_for_iopf(struct device *dev,
- struct iommu_fault *fault)
-{
- struct iommu_domain *domain;
-
- if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) {
- domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0);
- if (IS_ERR(domain))
- domain = NULL;
- } else {
- domain = iommu_get_domain_for_dev(dev);
- }
-
- if (!domain || !domain->iopf_handler) {
- dev_warn_ratelimited(dev,
- "iopf (pasid %d) without domain attached or handler installed\n",
- fault->prm.pasid);
-
- return NULL;
- }
-
- return domain;
-}
-
/* Non-last request of a group. Postpone until the last one. */
static int report_partial_fault(struct iommu_fault_param *fault_param,
struct iommu_fault *fault)
@@ -199,10 +176,20 @@ static int report_partial_fault(struct iommu_fault_param *fault_param,
return 0;
}
+static ioasid_t fault_to_pasid(struct iommu_fault *fault)
+{
+ if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID)
+ return fault->prm.pasid;
+
+ return IOMMU_NO_PASID;
+}
+
static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
struct iopf_fault *evt,
struct iopf_group *abort_group)
{
+ ioasid_t pasid = fault_to_pasid(&evt->fault);
+ struct iopf_attach_cookie *cookie;
struct iopf_fault *iopf, *next;
struct iopf_group *group;
@@ -215,7 +202,23 @@ static struct iopf_group *iopf_group_alloc(struct iommu_fault_param *iopf_param,
group = abort_group;
}
+ cookie = iopf_pasid_cookie_get(iopf_param->dev, pasid);
+ if (!cookie && pasid != IOMMU_NO_PASID)
+ cookie = iopf_pasid_cookie_get(iopf_param->dev, IOMMU_NO_PASID);
+ if (IS_ERR(cookie) || !cookie) {
+ /*
+ * The PASID of this device was not attached by an I/O-capable
+ * domain. Ask the caller to abort handling of this fault.
+ * Otherwise, the reference count will be switched to the new
+ * iopf group and will be released in iopf_free_group().
+ */
+ kfree(group);
+ group = abort_group;
+ cookie = NULL;
+ }
+
group->fault_param = iopf_param;
+ group->cookie = cookie;
group->last_fault.fault = evt->fault;
INIT_LIST_HEAD(&group->faults);
INIT_LIST_HEAD(&group->pending_node);
@@ -305,15 +308,11 @@ void iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
if (group == &abort_group)
goto err_abort;
- group->domain = get_domain_for_iopf(dev, fault);
- if (!group->domain)
- goto err_abort;
-
/*
* On success iopf_handler must call iopf_group_response() and
* iopf_free_group()
*/
- if (group->domain->iopf_handler(group))
+ if (group->cookie->domain->iopf_handler(group))
goto err_abort;
return;
diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index b51995b4fe90..fff3ee1ee9ce 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -50,6 +50,39 @@ static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm, struct de
return iommu_mm;
}
+static void release_attach_cookie(struct iopf_attach_cookie *cookie)
+{
+ struct iommu_domain *domain = cookie->domain;
+
+ mutex_lock(&iommu_sva_lock);
+ if (--domain->users == 0) {
+ list_del(&domain->next);
+ iommu_domain_free(domain);
+ }
+ mutex_unlock(&iommu_sva_lock);
+
+ kfree(cookie);
+}
+
+static int sva_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iopf_attach_cookie *cookie;
+ int ret;
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return -ENOMEM;
+
+ cookie->release = release_attach_cookie;
+
+ ret = iopf_domain_attach(domain, dev, pasid, cookie);
+ if (ret)
+ kfree(cookie);
+
+ return ret;
+}
+
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
@@ -90,7 +123,7 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
/* Search for an existing domain. */
list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
- ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
+ ret = sva_attach_device_pasid(domain, dev, iommu_mm->pasid);
if (!ret) {
domain->users++;
goto out;
@@ -104,7 +137,7 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
goto out_free_handle;
}
- ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
+ ret = sva_attach_device_pasid(domain, dev, iommu_mm->pasid);
if (ret)
goto out_free_domain;
domain->users = 1;
@@ -140,13 +173,7 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
struct iommu_mm_data *iommu_mm = domain->mm->iommu_mm;
struct device *dev = handle->dev;
- mutex_lock(&iommu_sva_lock);
- iommu_detach_device_pasid(domain, dev, iommu_mm->pasid);
- if (--domain->users == 0) {
- list_del(&domain->next);
- iommu_domain_free(domain);
- }
- mutex_unlock(&iommu_sva_lock);
+ iopf_domain_detach(domain, dev, iommu_mm->pasid);
kfree(handle);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
@@ -242,7 +269,8 @@ static void iommu_sva_handle_iopf(struct work_struct *work)
if (status != IOMMU_PAGE_RESP_SUCCESS)
break;
- status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
+ status = iommu_sva_handle_mm(&iopf->fault,
+ group->cookie->domain->mm);
}
iopf_group_response(group, status);
--
2.34.1