[PATCH 5.12 011/292] iommu/vt-d: Global devTLB flush when present context entry changed

From: Greg Kroah-Hartman
Date: Mon Jul 19 2021 - 13:59:36 EST


From: Sanjay Kumar <sanjay.k.kumar@xxxxxxxxx>

commit 37764b952e1b39053defc7ebe5dcd8c4e3e78de9 upstream.

This fixes a bug in context cache clear operation. The code was not
following the correct invalidation flow. A global device TLB invalidation
should be added after the IOTLB invalidation. At the same time, it
uses the domain ID from the context entry. But in scalable mode, the
domain ID is in PASID table entry, not context entry.

Fixes: 7373a8cc38197 ("iommu/vt-d: Setup context and enable RID2PASID support")
Cc: stable@xxxxxxxxxxxxxxx # v5.0+
Signed-off-by: Sanjay Kumar <sanjay.k.kumar@xxxxxxxxx>
Signed-off-by: Lu Baolu <baolu.lu@xxxxxxxxxxxxxxx>
Link: https://lore.kernel.org/r/20210712071315.3416543-1-baolu.lu@xxxxxxxxxxxxxxx
Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
drivers/iommu/intel/iommu.c | 31 ++++++++++++++++++++++---------
1 file changed, 22 insertions(+), 9 deletions(-)

--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -2434,10 +2434,11 @@ __domain_mapping(struct dmar_domain *dom
return 0;
}

-static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn)
+static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn)
{
- unsigned long flags;
+ struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
+ unsigned long flags;
u16 did_old;

if (!iommu)
@@ -2449,7 +2450,16 @@ static void domain_context_clear_one(str
spin_unlock_irqrestore(&iommu->lock, flags);
return;
}
- did_old = context_domain_id(context);
+
+ if (sm_supported(iommu)) {
+ if (hw_pass_through && domain_type_is_si(info->domain))
+ did_old = FLPT_DEFAULT_DID;
+ else
+ did_old = info->domain->iommu_did[iommu->seq_id];
+ } else {
+ did_old = context_domain_id(context);
+ }
+
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -2467,6 +2477,8 @@ static void domain_context_clear_one(str
0,
0,
DMA_TLB_DSI_FLUSH);
+
+ __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
}

static inline void unlink_domain_info(struct device_domain_info *info)
@@ -4456,9 +4468,9 @@ out_free_dmar:

static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
{
- struct intel_iommu *iommu = opaque;
+ struct device_domain_info *info = opaque;

- domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
+ domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff);
return 0;
}

@@ -4468,12 +4480,13 @@ static int domain_context_clear_one_cb(s
* devices, unbinding the driver from any one of them will possibly leave
* the others unable to operate.
*/
-static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
+static void domain_context_clear(struct device_domain_info *info)
{
- if (!iommu || !dev || !dev_is_pci(dev))
+ if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
return;

- pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
+ pci_for_each_dma_alias(to_pci_dev(info->dev),
+ &domain_context_clear_one_cb, info);
}

static void __dmar_remove_one_dev_info(struct device_domain_info *info)
@@ -4497,7 +4510,7 @@ static void __dmar_remove_one_dev_info(s

iommu_disable_dev_iotlb(info);
if (!dev_is_real_dma_subdevice(info->dev))
- domain_context_clear(iommu, info->dev);
+ domain_context_clear(info);
intel_pasid_free_table(info->dev);
}