dmapool: remove checks for dev == NULL
dmapool originally tried to support pools without a device because
dma_alloc_coherent() supports allocations without a device. But nobody
ended up using dma pools without a device, and trying to do so will result
in an oops. So remove the checks for pool->dev == NULL since they are
unneeded bloat.
[kbusch@kernel.org: add check for null dev on create]
Link: https://lkml.kernel.org/r/20230126215125.4069751-3-kbusch@meta.com
Fixes: 2d55c16c0c
("dmapool: create/destroy cleanup")
Signed-off-by: Tony Battersby <tonyb@cybernetics.com>
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
418d5c9831
commit
67a540c60c
25
mm/dmapool.c
25
mm/dmapool.c
@ -134,6 +134,9 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
|
||||
size_t allocation;
|
||||
bool empty = false;
|
||||
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
if (align == 0)
|
||||
align = 1;
|
||||
else if (align & (align - 1))
|
||||
@ -275,7 +278,7 @@ void dma_pool_destroy(struct dma_pool *pool)
|
||||
mutex_lock(&pools_reg_lock);
|
||||
mutex_lock(&pools_lock);
|
||||
list_del(&pool->pools);
|
||||
if (pool->dev && list_empty(&pool->dev->dma_pools))
|
||||
if (list_empty(&pool->dev->dma_pools))
|
||||
empty = true;
|
||||
mutex_unlock(&pools_lock);
|
||||
if (empty)
|
||||
@ -284,12 +287,8 @@ void dma_pool_destroy(struct dma_pool *pool)
|
||||
|
||||
list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
|
||||
if (is_page_busy(page)) {
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "%s %s, %p busy\n", __func__,
|
||||
pool->name, page->vaddr);
|
||||
else
|
||||
pr_err("%s %s, %p busy\n", __func__,
|
||||
pool->name, page->vaddr);
|
||||
/* leak the still-in-use consistent memory */
|
||||
list_del(&page->page_list);
|
||||
kfree(page);
|
||||
@ -351,12 +350,8 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
|
||||
for (i = sizeof(page->offset); i < pool->size; i++) {
|
||||
if (data[i] == POOL_POISON_FREED)
|
||||
continue;
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "%s %s, %p (corrupted)\n",
|
||||
__func__, pool->name, retval);
|
||||
else
|
||||
pr_err("%s %s, %p (corrupted)\n",
|
||||
__func__, pool->name, retval);
|
||||
|
||||
/*
|
||||
* Dump the first 4 bytes even if they are not
|
||||
@ -411,12 +406,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||
page = pool_find_page(pool, dma);
|
||||
if (!page) {
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
|
||||
__func__, pool->name, vaddr, &dma);
|
||||
else
|
||||
pr_err("%s %s, %p/%pad (bad dma)\n",
|
||||
__func__, pool->name, vaddr, &dma);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -426,12 +417,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||
#ifdef DMAPOOL_DEBUG
|
||||
if ((dma - page->dma) != offset) {
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
|
||||
__func__, pool->name, vaddr, &dma);
|
||||
else
|
||||
pr_err("%s %s, %p (bad vaddr)/%pad\n",
|
||||
__func__, pool->name, vaddr, &dma);
|
||||
return;
|
||||
}
|
||||
{
|
||||
@ -442,12 +429,8 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
|
||||
continue;
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
if (pool->dev)
|
||||
dev_err(pool->dev, "%s %s, dma %pad already free\n",
|
||||
__func__, pool->name, &dma);
|
||||
else
|
||||
pr_err("%s %s, dma %pad already free\n",
|
||||
__func__, pool->name, &dma);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user