dma-mapping: Remove dma_check_mask()

sme_active() is an x86-specific function so it's better not to call it from
generic code. Christoph Hellwig mentioned that "There is no reason why we
should have a special debug printk just for one specific reason why there
is a requirement for a large DMA mask.", so just remove dma_check_mask().

Signed-off-by: Thiago Jung Bauermann <bauerman@linux.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190806044919.10622-4-bauerman@linux.ibm.com
This commit is contained in:
Thiago Jung Bauermann 2019-08-06 01:49:16 -03:00 committed by Michael Ellerman
parent 47e5d8f9ed
commit e740815a97

View File

@ -291,12 +291,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
} }
EXPORT_SYMBOL(dma_free_attrs); EXPORT_SYMBOL(dma_free_attrs);
static inline void dma_check_mask(struct device *dev, u64 mask)
{
if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
}
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
@ -327,7 +321,6 @@ int dma_set_mask(struct device *dev, u64 mask)
return -EIO; return -EIO;
arch_dma_set_mask(dev, mask); arch_dma_set_mask(dev, mask);
dma_check_mask(dev, mask);
*dev->dma_mask = mask; *dev->dma_mask = mask;
return 0; return 0;
} }
@ -345,7 +338,6 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
if (!dma_supported(dev, mask)) if (!dma_supported(dev, mask))
return -EIO; return -EIO;
dma_check_mask(dev, mask);
dev->coherent_dma_mask = mask; dev->coherent_dma_mask = mask;
return 0; return 0;
} }