dmapool: remove checks for dev == NULL

dmapool originally tried to support pools without a device because
dma_alloc_coherent() supports allocations without a device.  But nobody
ended up using dma pools without a device, and trying to do so will result
in an oops.  So remove the checks for pool->dev == NULL since they are
unneeded bloat.

[kbusch@kernel.org: add check for null dev on create]
Link: https://lkml.kernel.org/r/20230126215125.4069751-3-kbusch@meta.com
Fixes: 2d55c16c0c ("dmapool: create/destroy cleanup")
Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Tony Battersby 2023-01-26 13:51:15 -08:00 committed by Andrew Morton
parent 418d5c9831
commit 67a540c60c

View File

@ -134,6 +134,9 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
size_t allocation; size_t allocation;
bool empty = false; bool empty = false;
if (!dev)
return NULL;
if (align == 0) if (align == 0)
align = 1; align = 1;
else if (align & (align - 1)) else if (align & (align - 1))
@ -275,7 +278,7 @@ void dma_pool_destroy(struct dma_pool *pool)
mutex_lock(&pools_reg_lock); mutex_lock(&pools_reg_lock);
mutex_lock(&pools_lock); mutex_lock(&pools_lock);
list_del(&pool->pools); list_del(&pool->pools);
if (pool->dev && list_empty(&pool->dev->dma_pools)) if (list_empty(&pool->dev->dma_pools))
empty = true; empty = true;
mutex_unlock(&pools_lock); mutex_unlock(&pools_lock);
if (empty) if (empty)
@ -284,12 +287,8 @@ void dma_pool_destroy(struct dma_pool *pool)
list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
if (is_page_busy(page)) { if (is_page_busy(page)) {
if (pool->dev)
dev_err(pool->dev, "%s %s, %p busy\n", __func__, dev_err(pool->dev, "%s %s, %p busy\n", __func__,
pool->name, page->vaddr); pool->name, page->vaddr);
else
pr_err("%s %s, %p busy\n", __func__,
pool->name, page->vaddr);
/* leak the still-in-use consistent memory */ /* leak the still-in-use consistent memory */
list_del(&page->page_list); list_del(&page->page_list);
kfree(page); kfree(page);
@ -351,12 +350,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
for (i = sizeof(page->offset); i < pool->size; i++) { for (i = sizeof(page->offset); i < pool->size; i++) {
if (data[i] == POOL_POISON_FREED) if (data[i] == POOL_POISON_FREED)
continue; continue;
if (pool->dev)
dev_err(pool->dev, "%s %s, %p (corrupted)\n", dev_err(pool->dev, "%s %s, %p (corrupted)\n",
__func__, pool->name, retval); __func__, pool->name, retval);
else
pr_err("%s %s, %p (corrupted)\n",
__func__, pool->name, retval);
/* /*
* Dump the first 4 bytes even if they are not * Dump the first 4 bytes even if they are not
@ -411,12 +406,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
page = pool_find_page(pool, dma); page = pool_find_page(pool, dma);
if (!page) { if (!page) {
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
__func__, pool->name, vaddr, &dma); __func__, pool->name, vaddr, &dma);
else
pr_err("%s %s, %p/%pad (bad dma)\n",
__func__, pool->name, vaddr, &dma);
return; return;
} }
@ -426,12 +417,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
#ifdef DMAPOOL_DEBUG #ifdef DMAPOOL_DEBUG
if ((dma - page->dma) != offset) { if ((dma - page->dma) != offset) {
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n", dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
__func__, pool->name, vaddr, &dma); __func__, pool->name, vaddr, &dma);
else
pr_err("%s %s, %p (bad vaddr)/%pad\n",
__func__, pool->name, vaddr, &dma);
return; return;
} }
{ {
@ -442,12 +429,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
continue; continue;
} }
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);
if (pool->dev)
dev_err(pool->dev, "%s %s, dma %pad already free\n", dev_err(pool->dev, "%s %s, dma %pad already free\n",
__func__, pool->name, &dma); __func__, pool->name, &dma);
else
pr_err("%s %s, dma %pad already free\n",
__func__, pool->name, &dma);
return; return;
} }
} }