From 6c385c1fa0a7fc767138a7bb39603966d1519c57 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 17 Jun 2024 11:23:14 -0300 Subject: [PATCH] swiotlb: Reinstate page-alignment for mappings >= PAGE_SIZE commit 14cebf689a78e8a1c041138af221ef6eac6bc7da upstream. For swiotlb allocations >= PAGE_SIZE, the slab search historically adjusted the stride to avoid checking unaligned slots. This had the side-effect of aligning large mapping requests to PAGE_SIZE, but that was broken by 0eee5ae10256 ("swiotlb: fix slot alignment checks"). Since this alignment could be relied upon drivers, reinstate PAGE_SIZE alignment for swiotlb mappings >= PAGE_SIZE. Cc: stable@vger.kernel.org # v6.6+ Reported-by: Michael Kelley Signed-off-by: Will Deacon Reviewed-by: Robin Murphy Reviewed-by: Petr Tesarik Tested-by: Nicolin Chen Tested-by: Michael Kelley Signed-off-by: Christoph Hellwig Signed-off-by: Fabio Estevam Signed-off-by: Greg Kroah-Hartman --- kernel/dma/swiotlb.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c index 4c10700c61d2..0dc3ec199fe4 100644 --- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -992,6 +992,17 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool, BUG_ON(!nslots); BUG_ON(area_index >= pool->nareas); + /* + * Historically, swiotlb allocations >= PAGE_SIZE were guaranteed to be + * page-aligned in the absence of any other alignment requirements. + * 'alloc_align_mask' was later introduced to specify the alignment + * explicitly, however this is passed as zero for streaming mappings + * and so we preserve the old behaviour there in case any drivers are + * relying on it. + */ + if (!alloc_align_mask && !iotlb_align_mask && alloc_size >= PAGE_SIZE) + alloc_align_mask = PAGE_SIZE - 1; + /* * Ensure that the allocation is at least slot-aligned and update * 'iotlb_align_mask' to ignore bits that will be preserved when @@ -1006,13 +1017,6 @@ static int swiotlb_area_find_slots(struct device *dev, struct io_tlb_pool *pool, */ stride = get_max_slots(max(alloc_align_mask, iotlb_align_mask)); - /* - * For allocations of PAGE_SIZE or larger only look for page aligned - * allocations. - */ - if (alloc_size >= PAGE_SIZE) - stride = umax(stride, PAGE_SHIFT - IO_TLB_SHIFT + 1); - spin_lock_irqsave(&area->lock, flags); if (unlikely(nslots > pool->area_nslabs - area->used)) goto not_found;