x86/dma: Remove dma_alloc_coherent_mask()
These days all devices (including the ISA fallback device) have a coherent DMA mask set, so remove the workaround. Tested-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Joerg Roedel <joro@8bytes.org> Cc: Jon Mason <jdmason@kudzu.us> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Muli Ben-Yehuda <mulix@mulix.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-3-hch@lst.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
3eb93ea327
commit
038d07a283
@ -44,26 +44,12 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size,
|
|||||||
void *vaddr, dma_addr_t dma_addr,
|
void *vaddr, dma_addr_t dma_addr,
|
||||||
unsigned long attrs);
|
unsigned long attrs);
|
||||||
|
|
||||||
static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
|
|
||||||
gfp_t gfp)
|
|
||||||
{
|
|
||||||
unsigned long dma_mask = 0;
|
|
||||||
|
|
||||||
dma_mask = dev->coherent_dma_mask;
|
|
||||||
if (!dma_mask)
|
|
||||||
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
|
|
||||||
|
|
||||||
return dma_mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
||||||
{
|
{
|
||||||
unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
|
if (dev->coherent_dma_mask <= DMA_BIT_MASK(24))
|
||||||
|
|
||||||
if (dma_mask <= DMA_BIT_MASK(24))
|
|
||||||
gfp |= GFP_DMA;
|
gfp |= GFP_DMA;
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
|
if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
|
||||||
gfp |= GFP_DMA32;
|
gfp |= GFP_DMA32;
|
||||||
#endif
|
#endif
|
||||||
return gfp;
|
return gfp;
|
||||||
|
@ -80,13 +80,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|||||||
dma_addr_t *dma_addr, gfp_t flag,
|
dma_addr_t *dma_addr, gfp_t flag,
|
||||||
unsigned long attrs)
|
unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long dma_mask;
|
|
||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
|
|
||||||
dma_mask = dma_alloc_coherent_mask(dev, flag);
|
|
||||||
|
|
||||||
again:
|
again:
|
||||||
page = NULL;
|
page = NULL;
|
||||||
/* CMA can be used only in the context which permits sleeping */
|
/* CMA can be used only in the context which permits sleeping */
|
||||||
@ -95,7 +92,7 @@ again:
|
|||||||
flag);
|
flag);
|
||||||
if (page) {
|
if (page) {
|
||||||
addr = phys_to_dma(dev, page_to_phys(page));
|
addr = phys_to_dma(dev, page_to_phys(page));
|
||||||
if (addr + size > dma_mask) {
|
if (addr + size > dev->coherent_dma_mask) {
|
||||||
dma_release_from_contiguous(dev, page, count);
|
dma_release_from_contiguous(dev, page, count);
|
||||||
page = NULL;
|
page = NULL;
|
||||||
}
|
}
|
||||||
@ -108,10 +105,11 @@ again:
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
addr = phys_to_dma(dev, page_to_phys(page));
|
addr = phys_to_dma(dev, page_to_phys(page));
|
||||||
if (addr + size > dma_mask) {
|
if (addr + size > dev->coherent_dma_mask) {
|
||||||
__free_pages(page, get_order(size));
|
__free_pages(page, get_order(size));
|
||||||
|
|
||||||
if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
|
if (dev->coherent_dma_mask < DMA_BIT_MASK(32) &&
|
||||||
|
!(flag & GFP_DMA)) {
|
||||||
flag = (flag & ~GFP_DMA32) | GFP_DMA;
|
flag = (flag & ~GFP_DMA32) | GFP_DMA;
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
@ -198,12 +198,10 @@ void __init sme_early_init(void)
|
|||||||
static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||||
gfp_t gfp, unsigned long attrs)
|
gfp_t gfp, unsigned long attrs)
|
||||||
{
|
{
|
||||||
unsigned long dma_mask;
|
|
||||||
unsigned int order;
|
unsigned int order;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
void *vaddr = NULL;
|
void *vaddr = NULL;
|
||||||
|
|
||||||
dma_mask = dma_alloc_coherent_mask(dev, gfp);
|
|
||||||
order = get_order(size);
|
order = get_order(size);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -221,7 +219,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|||||||
* mask with it already cleared.
|
* mask with it already cleared.
|
||||||
*/
|
*/
|
||||||
addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
|
addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
|
||||||
if ((addr + size) > dma_mask) {
|
if ((addr + size) > dev->coherent_dma_mask) {
|
||||||
__free_pages(page, get_order(size));
|
__free_pages(page, get_order(size));
|
||||||
} else {
|
} else {
|
||||||
vaddr = page_address(page);
|
vaddr = page_address(page);
|
||||||
|
@ -53,20 +53,6 @@
|
|||||||
* API.
|
* API.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef CONFIG_X86
|
|
||||||
static unsigned long dma_alloc_coherent_mask(struct device *dev,
|
|
||||||
gfp_t gfp)
|
|
||||||
{
|
|
||||||
unsigned long dma_mask = 0;
|
|
||||||
|
|
||||||
dma_mask = dev->coherent_dma_mask;
|
|
||||||
if (!dma_mask)
|
|
||||||
dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
|
|
||||||
|
|
||||||
return dma_mask;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
|
#define XEN_SWIOTLB_ERROR_CODE (~(dma_addr_t)0x0)
|
||||||
|
|
||||||
static char *xen_io_tlb_start, *xen_io_tlb_end;
|
static char *xen_io_tlb_start, *xen_io_tlb_end;
|
||||||
@ -328,7 +314,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (hwdev && hwdev->coherent_dma_mask)
|
if (hwdev && hwdev->coherent_dma_mask)
|
||||||
dma_mask = dma_alloc_coherent_mask(hwdev, flags);
|
dma_mask = hwdev->coherent_dma_mask;
|
||||||
|
|
||||||
/* At this point dma_handle is the physical address, next we are
|
/* At this point dma_handle is the physical address, next we are
|
||||||
* going to set it to the machine address.
|
* going to set it to the machine address.
|
||||||
|
Loading…
Reference in New Issue
Block a user