[PATCH v4 6/8] swiotlb: determine potential physical address limit

From: Petr Tesarik
Date: Thu Jul 13 2023 - 11:26:40 EST


From: Petr Tesarik <petr.tesarik.ext@xxxxxxxxxx>

The value returned by default_swiotlb_limit() should be constant, because
it is used to decide whether DMA can be used. To allow allocating memory
pools on the fly, use the maximum possible physical address rather than the
highest address used by the default pool.

For swiotlb_init_remap(), this is either an arch-specific limit used by
memblock_alloc_low(), or the highest directly mapped physical address if
the initialization flags include SWIOTLB_ANY. For swiotlb_init_late(), the
highest address is determined by the GFP flags.

Signed-off-by: Petr Tesarik <petr.tesarik.ext@xxxxxxxxxx>
---
include/linux/swiotlb.h | 2 ++
kernel/dma/swiotlb.c | 12 +++++++++++-
2 files changed, 13 insertions(+), 1 deletion(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b642e7739604..ff8f5150f4de 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -105,6 +105,7 @@ struct io_tlb_pool {
* struct io_tlb_mem - Software IO TLB allocator
* @pool: IO TLB memory pool descriptor.
* @nslabs: Total number of IO TLB slabs in all pools.
+ * @phys_limit: Maximum allowed physical address.
* @debugfs: The dentry to debugfs.
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
@@ -118,6 +119,7 @@ struct io_tlb_pool {
struct io_tlb_mem {
struct io_tlb_pool *pool;
unsigned long nslabs;
+ u64 phys_limit;
struct dentry *debugfs;
bool force_bounce;
bool for_alloc;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 6ec5a81acc2a..d6a05727efc5 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -334,6 +334,10 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
io_tlb_default_mem.force_bounce =
swiotlb_force_bounce || (flags & SWIOTLB_FORCE);
io_tlb_default_mem.can_grow = !remap;
+ if (flags & SWIOTLB_ANY)
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);
+ else
+ io_tlb_default_mem.phys_limit = ARCH_LOW_ADDRESS_LIMIT;

if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
@@ -402,6 +406,12 @@ int swiotlb_init_late(size_t size, gfp_t gfp_mask,

io_tlb_default_mem.force_bounce = swiotlb_force_bounce;
io_tlb_default_mem.can_grow = !remap;
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp_mask & __GFP_DMA))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(zone_dma_bits);
+ else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp_mask & __GFP_DMA32))
+ io_tlb_default_mem.phys_limit = DMA_BIT_MASK(32);
+ else
+ io_tlb_default_mem.phys_limit = virt_to_phys(high_memory - 1);

if (!default_nareas)
swiotlb_adjust_nareas(num_possible_cpus());
@@ -1338,7 +1348,7 @@ phys_addr_t default_swiotlb_start(void)
*/
phys_addr_t default_swiotlb_limit(void)
{
- return io_tlb_default_pool.end - 1;
+ return io_tlb_default_mem.phys_limit;
}

#ifdef CONFIG_DEBUG_FS
--
2.25.1