[RFC V1 2/5] swiotlb: Allow setting up default alignment of SWIOTLB region

From: Vishal Annapurve
Date: Fri Jan 12 2024 - 00:53:35 EST


Allow adjusting alignment of SWIOTLB memory. CVMs can use this framework
to align the shared memory regions as needed.

Signed-off-by: Vishal Annapurve <vannapurve@xxxxxxxxxx>
---
include/linux/swiotlb.h | 5 +++++
kernel/dma/swiotlb.c | 12 +++++++++---
2 files changed, 14 insertions(+), 3 deletions(-)

diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 058901313405..450bd82cdb9f 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -206,6 +206,7 @@ size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_allocated(void);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
+void __init swiotlb_adjust_alignment(unsigned long alignment);
phys_addr_t default_swiotlb_base(void);
phys_addr_t default_swiotlb_limit(void);
#else
@@ -247,6 +248,10 @@ static inline void swiotlb_adjust_size(unsigned long size)
{
}

+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+}
+
static inline phys_addr_t default_swiotlb_base(void)
{
return 0;
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index a056d2f8b9ee..eeab0607a028 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -97,6 +97,7 @@ static struct io_tlb_mem io_tlb_default_mem;
#endif /* CONFIG_SWIOTLB_DYNAMIC */

static unsigned long default_nslabs = IO_TLB_DEFAULT_SIZE >> IO_TLB_SHIFT;
+static unsigned long default_alignment = PAGE_SIZE;
static unsigned long default_nareas;

/**
@@ -223,6 +224,11 @@ void __init swiotlb_adjust_size(unsigned long size)
pr_info("SWIOTLB bounce buffer size adjusted to %luMB", size >> 20);
}

+void __init swiotlb_adjust_alignment(unsigned long alignment)
+{
+ default_alignment = alignment;
+}
+
void swiotlb_print_info(void)
{
struct io_tlb_pool *mem = &io_tlb_default_mem.defpool;
@@ -315,7 +321,7 @@ static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
unsigned int flags,
int (*remap)(void *tlb, unsigned long nslabs))
{
- size_t bytes = PAGE_ALIGN(nslabs << IO_TLB_SHIFT);
+ size_t bytes = ALIGN(nslabs << IO_TLB_SHIFT, default_alignment);
void *tlb;

/*
@@ -324,9 +330,9 @@ static void __init *swiotlb_memblock_alloc(unsigned long nslabs,
* memory encryption.
*/
if (flags & SWIOTLB_ANY)
- tlb = memblock_alloc(bytes, PAGE_SIZE);
+ tlb = memblock_alloc(bytes, default_alignment);
else
- tlb = memblock_alloc_low(bytes, PAGE_SIZE);
+ tlb = memblock_alloc_low(bytes, default_alignment);

if (!tlb) {
pr_warn("%s: Failed to allocate %zu bytes tlb structure\n",
--
2.43.0.275.g3460e3d667-goog