From 4d0564785bb03841e4b5c5b31aa4ecd1eb0d01bb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Oct 2021 13:18:34 +0200 Subject: [PATCH 01/12] dma-direct: factor out dma_set_{de,en}crypted helpers Factor out helpers the make dealing with memory encryption a little less cumbersome. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 56 ++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 4c6c5e0635e3..d4d54af31a34 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -75,6 +75,20 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); } +static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) +{ + if (!force_dma_unencrypted(dev)) + return 0; + return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size)); +} + +static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) +{ + if (!force_dma_unencrypted(dev)) + return 0; + return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); +} + static void __dma_direct_free_pages(struct device *dev, struct page *page, size_t size) { @@ -154,7 +168,6 @@ void *dma_direct_alloc(struct device *dev, size_t size, { struct page *page; void *ret; - int err; size = PAGE_ALIGN(size); if (attrs & DMA_ATTR_NO_WARN) @@ -216,12 +229,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, __builtin_return_address(0)); if (!ret) goto out_free_pages; - if (force_dma_unencrypted(dev)) { - err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); - if (err) - goto out_free_pages; - } + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); goto done; } @@ -238,13 +247,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) { - err = set_memory_decrypted((unsigned long)ret, - 1 << get_order(size)); - if (err) - goto out_free_pages; - } - + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && @@ -259,13 +263,9 @@ done: return ret; out_encrypt_pages: - if (force_dma_unencrypted(dev)) { - err = set_memory_encrypted((unsigned long)page_address(page), - 1 << get_order(size)); - /* If memory cannot be re-encrypted, it must be leaked */ - if (err) - return NULL; - } + /* If memory cannot be re-encrypted, it must be leaked */ + if (dma_set_encrypted(dev, page_address(page), size)) + return NULL; out_free_pages: __dma_direct_free_pages(dev, page, size); return NULL; @@ -304,8 +304,7 @@ void dma_direct_free(struct device *dev, size_t size, dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return; - if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); + dma_set_encrypted(dev, cpu_addr, 1 << page_order); if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) vunmap(cpu_addr); @@ -341,11 +340,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, } ret = page_address(page); - if (force_dma_unencrypted(dev)) { - if (set_memory_decrypted((unsigned long)ret, - 1 << get_order(size))) - goto out_free_pages; - } + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; memset(ret, 0, size); *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return page; @@ -366,9 +362,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - if (force_dma_unencrypted(dev)) - set_memory_encrypted((unsigned long)vaddr, 1 << page_order); - + dma_set_encrypted(dev, vaddr, 1 << page_order); __dma_direct_free_pages(dev, page, size); } From 5570449b6876f215d49ac4db9ccce6ff7aa1e20a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Oct 2021 09:20:39 +0200 Subject: [PATCH 02/12] dma-direct: don't call dma_set_decrypted for remapped allocations Remapped allocations handle the encrypted bit through the pgprot passed to vmap, so there is no call dma_set_decrypted. Note that this case is currently entirely theoretical as no valid kernel configuration supports remapped allocations and memory encryption currently. Signed-off-by: Christoph Hellwig --- kernel/dma/direct.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index d4d54af31a34..996ba4edb2fa 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -229,8 +229,6 @@ void *dma_direct_alloc(struct device *dev, size_t size, __builtin_return_address(0)); if (!ret) goto out_free_pages; - if (dma_set_decrypted(dev, ret, size)) - goto out_free_pages; memset(ret, 0, size); goto done; } @@ -304,12 +302,13 @@ void dma_direct_free(struct device *dev, size_t size, dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) return; - dma_set_encrypted(dev, cpu_addr, 1 << page_order); - - if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) + if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) { vunmap(cpu_addr); - else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) - arch_dma_clear_uncached(cpu_addr, size); + } else { + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) + arch_dma_clear_uncached(cpu_addr, size); + dma_set_encrypted(dev, cpu_addr, 1 << page_order); + } __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); } From a90cf30437489343b8386ae87b4827b6d6c3ed50 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Nov 2021 15:41:01 +0100 Subject: [PATCH 03/12] dma-direct: always leak memory that can't be re-encrypted We must never let unencrypted memory go back into the general page pool. So if we fail to set it back to encrypted when freeing DMA memory, leak the memory instead and warn the user. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 996ba4edb2fa..d7a489be4847 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -84,9 +84,14 @@ static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size) static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) { + int ret; + if (!force_dma_unencrypted(dev)) return 0; - return set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); + ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size)); + if (ret) + pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n"); + return ret; } static void __dma_direct_free_pages(struct device *dev, struct page *page, @@ -261,7 +266,6 @@ done: return ret; out_encrypt_pages: - /* If memory cannot be re-encrypted, it must be leaked */ if (dma_set_encrypted(dev, page_address(page), size)) return NULL; out_free_pages: @@ -307,7 +311,8 @@ void dma_direct_free(struct device *dev, size_t size, } else { if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED)) arch_dma_clear_uncached(cpu_addr, size); - dma_set_encrypted(dev, cpu_addr, 1 << page_order); + if (dma_set_encrypted(dev, cpu_addr, 1 << page_order)) + return; } __dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size); @@ -361,7 +366,8 @@ void dma_direct_free_pages(struct device *dev, size_t size, dma_free_from_pool(dev, vaddr, size)) return; - dma_set_encrypted(dev, vaddr, 1 << page_order); + if (dma_set_encrypted(dev, vaddr, 1 << page_order)) + return; __dma_direct_free_pages(dev, page, size); } From f3c962226dbec7a611ddd4eb7af7f4e19f4790ea Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Nov 2021 15:20:40 +0100 Subject: [PATCH 04/12] dma-direct: clean up the remapping checks in dma_direct_alloc Add two local variables to track if we want to remap the returned address using vmap or call dma_set_uncached and use that to simplify the code flow. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 48 ++++++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index d7a489be4847..3d1718dc077e 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -171,6 +171,7 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { + bool remap = false, set_uncached = false; struct page *page; void *ret; @@ -222,9 +223,25 @@ void *dma_direct_alloc(struct device *dev, size_t size, if (!page) return NULL; - if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev)) || - (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) { + if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) { + remap = true; + } else if (PageHighMem(page)) { + /* + * Depending on the cma= arguments and per-arch setup, + * dma_alloc_contiguous could return highmem pages. + * Without remapping there is no way to return them here, so + * log an error and fail. + */ + if (!IS_ENABLED(CONFIG_DMA_REMAP)) { + dev_info(dev, "Rejecting highmem page from CMA.\n"); + goto out_free_pages; + } + remap = true; + } else if (!dev_is_dma_coherent(dev) && + IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) + set_uncached = true; + + if (remap) { /* remove any dirty cache lines on the kernel alias */ arch_dma_prep_coherent(page, size); @@ -234,34 +251,21 @@ void *dma_direct_alloc(struct device *dev, size_t size, __builtin_return_address(0)); if (!ret) goto out_free_pages; - memset(ret, 0, size); - goto done; + } else { + ret = page_address(page); + if (dma_set_decrypted(dev, ret, size)) + goto out_free_pages; } - if (PageHighMem(page)) { - /* - * Depending on the cma= arguments and per-arch setup - * dma_alloc_contiguous could return highmem pages. - * Without remapping there is no way to return them here, - * so log an error and fail. - */ - dev_info(dev, "Rejecting highmem page from CMA.\n"); - goto out_free_pages; - } - - ret = page_address(page); - if (dma_set_decrypted(dev, ret, size)) - goto out_free_pages; memset(ret, 0, size); - if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !dev_is_dma_coherent(dev)) { + if (set_uncached) { arch_dma_prep_coherent(page, size); ret = arch_dma_set_uncached(ret, size); if (IS_ERR(ret)) goto out_encrypt_pages; } -done: + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); return ret; From d541ae55d538265861ef729a64d2d816d34ef1e2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 18 Oct 2021 13:08:07 +0200 Subject: [PATCH 05/12] dma-direct: factor out a helper for DMA_ATTR_NO_KERNEL_MAPPING allocations Split the code for DMA_ATTR_NO_KERNEL_MAPPING allocations into a separate helper to make dma_direct_alloc a little more readable. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy Acked-by: David Rientjes --- kernel/dma/direct.c | 31 ++++++++++++++++++++----------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 3d1718dc077e..01104660ec43 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -168,6 +168,24 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, return ret; } +static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) +{ + struct page *page; + + page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); + if (!page) + return NULL; + + /* remove any dirty cache lines on the kernel alias */ + if (!PageHighMem(page)) + arch_dma_prep_coherent(page, size); + + /* return the page pointer as the opaque cookie */ + *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); + return page; +} + void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { @@ -180,17 +198,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, gfp |= __GFP_NOWARN; if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && - !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) { - page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); - if (!page) - return NULL; - /* remove any dirty cache lines on the kernel alias */ - if (!PageHighMem(page)) - arch_dma_prep_coherent(page, size); - *dma_handle = phys_to_dma_direct(dev, page_to_phys(page)); - /* return the page pointer as the opaque cookie */ - return page; - } + !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) + return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && From a86d10942db2e0099a369b367fe62898f95987a8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Oct 2021 09:47:31 +0200 Subject: [PATCH 06/12] dma-direct: refactor the !coherent checks in dma_direct_alloc Add a big central !dev_is_dma_coherent(dev) block to deal with as much as of the uncached allocation schemes and document the schemes a bit better. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 66 ++++++++++++++++++++++++++++----------------- 1 file changed, 41 insertions(+), 25 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 01104660ec43..f9658fe18498 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -201,29 +201,49 @@ void *dma_direct_alloc(struct device *dev, size_t size, !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp); - if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && - !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && - !dev_is_dma_coherent(dev) && - !is_swiotlb_for_alloc(dev)) - return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + if (!dev_is_dma_coherent(dev)) { + /* + * Fallback to the arch handler if it exists. This should + * eventually go away. + */ + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && + !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !is_swiotlb_for_alloc(dev)) + return arch_dma_alloc(dev, size, dma_handle, gfp, + attrs); - if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && - !dev_is_dma_coherent(dev)) - return dma_alloc_from_global_coherent(dev, size, dma_handle); + /* + * If there is a global pool, always allocate from it for + * non-coherent devices. + */ + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL)) + return dma_alloc_from_global_coherent(dev, size, + dma_handle); + + /* + * Otherwise remap if the architecture is asking for it. But + * given that remapping memory is a blocking operation we'll + * instead have to dip into the atomic pools. + */ + remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); + if (remap) { + if (!gfpflags_allow_blocking(gfp) && + !is_swiotlb_for_alloc(dev)) + return dma_direct_alloc_from_pool(dev, size, + dma_handle, gfp); + } else { + if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) + set_uncached = true; + } + } /* - * Remapping or decrypting memory may block. If either is required and - * we can't block, allocate the memory from the atomic pools. - * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must - * set up another device coherent pool by shared-dma-pool and use - * dma_alloc_from_dev_coherent instead. + * Decrypting memory may block, so allocate the memory from the atomic + * pools if we can't block. */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && - !gfpflags_allow_blocking(gfp) && - (force_dma_unencrypted(dev) || - (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && - !dev_is_dma_coherent(dev))) && + force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); @@ -231,10 +251,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO); if (!page) return NULL; - - if (!dev_is_dma_coherent(dev) && IS_ENABLED(CONFIG_DMA_DIRECT_REMAP)) { - remap = true; - } else if (PageHighMem(page)) { + if (PageHighMem(page)) { /* * Depending on the cma= arguments and per-arch setup, * dma_alloc_contiguous could return highmem pages. @@ -246,9 +263,8 @@ void *dma_direct_alloc(struct device *dev, size_t size, goto out_free_pages; } remap = true; - } else if (!dev_is_dma_coherent(dev) && - IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) - set_uncached = true; + set_uncached = false; + } if (remap) { /* remove any dirty cache lines on the kernel alias */ From 955f58f7406ad912825fc344c7825fd904b124a0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Nov 2021 15:47:56 +0100 Subject: [PATCH 07/12] dma-direct: fail allocations that can't be made coherent If the architecture can't remap or set an address uncached there is no way to fullfill a request for a coherent allocation. Return NULL in that case. Note that this case currently does not happen, so this is a theoretical fixup and/or a preparation for eventually supporting platforms that can't support coherent allocations with the generic code. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f9658fe18498..a13017656eca 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -233,8 +233,9 @@ void *dma_direct_alloc(struct device *dev, size_t size, return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); } else { - if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) - set_uncached = true; + if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED)) + return NULL; + set_uncached = true; } } From 78bc72787ab9e638173aeb1f589578105d3a43c9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Oct 2021 10:00:55 +0200 Subject: [PATCH 08/12] dma-direct: warn if there is no pool for force unencrypted allocations Instead of blindly running into a blocking operation for a non-blocking gfp, return NULL and spew an error. Note that Kconfig prevents this for all currently relevant platforms, and this is just a debug check. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index a13017656eca..84226a764471 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -159,6 +159,9 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, u64 phys_mask; void *ret; + if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))) + return NULL; + gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_mask); page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok); @@ -243,8 +246,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, * Decrypting memory may block, so allocate the memory from the atomic * pools if we can't block. */ - if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && - force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && + if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); @@ -354,8 +356,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; - if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && - force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && + if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); From f5d3939a5916c0a8a0b47dcbc33963dbffe74f90 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Oct 2021 09:34:59 +0200 Subject: [PATCH 09/12] dma-direct: drop two CONFIG_DMA_RESTRICTED_POOL conditionals swiotlb_alloc and swiotlb_free are properly stubbed out if CONFIG_DMA_RESTRICTED_POOL is not set, so skip the extra checks. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 84226a764471..cf75bfb2f499 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -97,8 +97,7 @@ static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size) static void __dma_direct_free_pages(struct device *dev, struct page *page, size_t size) { - if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && - swiotlb_free(dev, page, size)) + if (swiotlb_free(dev, page, size)) return; dma_free_contiguous(dev, page, size); } @@ -114,8 +113,7 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); - if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) && - is_swiotlb_for_alloc(dev)) { + if (is_swiotlb_for_alloc(dev)) { page = swiotlb_alloc(dev, size); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { __dma_direct_free_pages(dev, page, size); From aea7e2a86a94b2583e1e812c596140034398a169 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 21 Oct 2021 09:39:12 +0200 Subject: [PATCH 10/12] dma-direct: factor the swiotlb code out of __dma_direct_alloc_pages Add a new helper to deal with the swiotlb case. This keeps the code nicely boundled and removes the not required call to dma_direct_optimal_gfp_mask for the swiotlb case. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index cf75bfb2f499..924937c54e8a 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -102,6 +102,18 @@ static void __dma_direct_free_pages(struct device *dev, struct page *page, dma_free_contiguous(dev, page, size); } +static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) +{ + struct page *page = swiotlb_alloc(dev, size); + + if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { + swiotlb_free(dev, page, size); + return NULL; + } + + return page; +} + static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, gfp_t gfp) { @@ -111,17 +123,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, WARN_ON_ONCE(!PAGE_ALIGNED(size)); + if (is_swiotlb_for_alloc(dev)) + return dma_direct_alloc_swiotlb(dev, size); + gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask, &phys_limit); - if (is_swiotlb_for_alloc(dev)) { - page = swiotlb_alloc(dev, size); - if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { - __dma_direct_free_pages(dev, page, size); - return NULL; - } - return page; - } - page = dma_alloc_contiguous(dev, size, gfp); if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { dma_free_contiguous(dev, page, size); From 28e4576d556bca543b0996e9edd4b767397e24c6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 9 Nov 2021 15:50:28 +0100 Subject: [PATCH 11/12] dma-direct: add a dma_direct_use_pool helper Add a helper to check if a potentially blocking operation should dip into the atomic pools. Signed-off-by: Christoph Hellwig Reviewed-by: Robin Murphy --- kernel/dma/direct.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index 924937c54e8a..50f48e9e4598 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -156,6 +156,15 @@ again: return page; } +/* + * Check if a potentially blocking operations needs to dip into the atomic + * pools for the given device/gfp. + */ +static bool dma_direct_use_pool(struct device *dev, gfp_t gfp) +{ + return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev); +} + static void *dma_direct_alloc_from_pool(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { @@ -235,8 +244,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, */ remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP); if (remap) { - if (!gfpflags_allow_blocking(gfp) && - !is_swiotlb_for_alloc(dev)) + if (dma_direct_use_pool(dev, gfp)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); } else { @@ -250,8 +258,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, * Decrypting memory may block, so allocate the memory from the atomic * pools if we can't block. */ - if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && - !is_swiotlb_for_alloc(dev)) + if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); /* we always manually zero the memory once we are done */ @@ -360,8 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size, struct page *page; void *ret; - if (force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) && - !is_swiotlb_for_alloc(dev)) + if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp)) return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp); page = __dma_direct_alloc_pages(dev, size, gfp); From f857acfc457ea63fa5b862d77f055665d863acfe Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 17 Nov 2021 14:53:48 -0700 Subject: [PATCH 12/12] lib/scatterlist: cleanup macros into static inline functions Convert the sg_is_chain(), sg_is_last() and sg_chain_ptr() macros into static inline functions. There's no reason for these to be macros and static inline are generally preferred these days. Also introduce the SG_PAGE_LINK_MASK define so the P2PDMA work, which is adding another bit to this mask, can do so more easily. Suggested-by: Jason Gunthorpe Signed-off-by: Logan Gunthorpe Reviewed-by: Chaitanya Kulkarni Signed-off-by: Christoph Hellwig --- include/linux/scatterlist.h | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 266754a55327..7ff9d6386c12 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -69,10 +69,27 @@ struct sg_append_table { * a valid sg entry, or whether it points to the start of a new scatterlist. * Those low bits are there for everyone! (thanks mason :-) */ -#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN) -#define sg_is_last(sg) ((sg)->page_link & SG_END) -#define sg_chain_ptr(sg) \ - ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END))) +#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END) + +static inline unsigned int __sg_flags(struct scatterlist *sg) +{ + return sg->page_link & SG_PAGE_LINK_MASK; +} + +static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg) +{ + return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK); +} + +static inline bool sg_is_chain(struct scatterlist *sg) +{ + return __sg_flags(sg) & SG_CHAIN; +} + +static inline bool sg_is_last(struct scatterlist *sg) +{ + return __sg_flags(sg) & SG_END; +} /** * sg_assign_page - Assign a given page to an SG entry @@ -92,7 +109,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page) * In order for the low bit stealing approach to work, pages * must be aligned at a 32-bit boundary as a minimum. */ - BUG_ON((unsigned long) page & (SG_CHAIN | SG_END)); + BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK); #ifdef CONFIG_DEBUG_SG BUG_ON(sg_is_chain(sg)); #endif @@ -126,7 +143,7 @@ static inline struct page *sg_page(struct scatterlist *sg) #ifdef CONFIG_DEBUG_SG BUG_ON(sg_is_chain(sg)); #endif - return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END)); + return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK); } /**