Revert "ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type"
This reverts commit2d9ea39917
. We've got a regression report showing that the audio got broken the device over AMD IOMMU. The conversion assumed the wrong pointer / page mapping for the indirect mapping case, and we need to correct this urgently, so let's revert it for now. Fixes:2d9ea39917
("ALSA: memalloc: Convert x86 SG-buffer handling with non-contiguous type") Reported-and-tested-by: Alex Xu (Hello71) <alex_y_xu@yahoo.ca> Link: https://lore.kernel.org/r/20211104180846.16340-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
2a5bb69448
commit
7599acb7b9
@ -36,6 +36,13 @@ struct snd_dma_device {
|
||||
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
|
||||
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
|
||||
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
|
||||
#ifdef CONFIG_SND_DMA_SGBUF
|
||||
#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
|
||||
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
|
||||
#else
|
||||
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
|
||||
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_ALLOCATOR
|
||||
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
|
||||
#else
|
||||
@ -44,13 +51,6 @@ struct snd_dma_device {
|
||||
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
|
||||
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
|
||||
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
|
||||
#ifdef CONFIG_SND_DMA_SGBUF
|
||||
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
|
||||
#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
|
||||
#else
|
||||
#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
|
||||
#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
|
||||
#endif
|
||||
|
||||
/*
|
||||
* info for buffer allocation
|
||||
|
@ -19,6 +19,7 @@ snd-$(CONFIG_SND_JACK) += ctljack.o jack.o
|
||||
snd-pcm-y := pcm.o pcm_native.o pcm_lib.o pcm_misc.o \
|
||||
pcm_memory.o memalloc.o
|
||||
snd-pcm-$(CONFIG_SND_PCM_TIMER) += pcm_timer.o
|
||||
snd-pcm-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
|
||||
snd-pcm-$(CONFIG_SND_PCM_ELD) += pcm_drm_eld.o
|
||||
snd-pcm-$(CONFIG_SND_PCM_IEC958) += pcm_iec958.o
|
||||
|
||||
|
@ -560,50 +560,6 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
|
||||
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
||||
};
|
||||
|
||||
/* x86-specific SG-buffer with WC pages */
|
||||
#ifdef CONFIG_SND_DMA_SGBUF
|
||||
#define vmalloc_to_virt(v) (unsigned long)page_to_virt(vmalloc_to_page(v))
|
||||
|
||||
static void *snd_dma_sg_wc_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||||
{
|
||||
void *p = snd_dma_noncontig_alloc(dmab, size);
|
||||
size_t ofs;
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
for (ofs = 0; ofs < size; ofs += PAGE_SIZE)
|
||||
set_memory_uc(vmalloc_to_virt(p + ofs), 1);
|
||||
return p;
|
||||
}
|
||||
|
||||
static void snd_dma_sg_wc_free(struct snd_dma_buffer *dmab)
|
||||
{
|
||||
size_t ofs;
|
||||
|
||||
for (ofs = 0; ofs < dmab->bytes; ofs += PAGE_SIZE)
|
||||
set_memory_wb(vmalloc_to_virt(dmab->area + ofs), 1);
|
||||
snd_dma_noncontig_free(dmab);
|
||||
}
|
||||
|
||||
static int snd_dma_sg_wc_mmap(struct snd_dma_buffer *dmab,
|
||||
struct vm_area_struct *area)
|
||||
{
|
||||
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
|
||||
/* FIXME: dma_mmap_noncontiguous() works? */
|
||||
return -ENOENT; /* continue with the default mmap handler */
|
||||
}
|
||||
|
||||
const struct snd_malloc_ops snd_dma_sg_wc_ops = {
|
||||
.alloc = snd_dma_sg_wc_alloc,
|
||||
.free = snd_dma_sg_wc_free,
|
||||
.mmap = snd_dma_sg_wc_mmap,
|
||||
.sync = snd_dma_noncontig_sync,
|
||||
.get_addr = snd_dma_vmalloc_get_addr,
|
||||
.get_page = snd_dma_vmalloc_get_page,
|
||||
.get_chunk_size = snd_dma_vmalloc_get_chunk_size,
|
||||
};
|
||||
#endif /* CONFIG_SND_DMA_SGBUF */
|
||||
|
||||
/*
|
||||
* Non-coherent pages allocator
|
||||
*/
|
||||
@ -663,13 +619,14 @@ static const struct snd_malloc_ops *dma_ops[] = {
|
||||
[SNDRV_DMA_TYPE_DEV_WC] = &snd_dma_wc_ops,
|
||||
[SNDRV_DMA_TYPE_NONCONTIG] = &snd_dma_noncontig_ops,
|
||||
[SNDRV_DMA_TYPE_NONCOHERENT] = &snd_dma_noncoherent_ops,
|
||||
#ifdef CONFIG_SND_DMA_SGBUF
|
||||
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_wc_ops,
|
||||
#endif
|
||||
#ifdef CONFIG_GENERIC_ALLOCATOR
|
||||
[SNDRV_DMA_TYPE_DEV_IRAM] = &snd_dma_iram_ops,
|
||||
#endif /* CONFIG_GENERIC_ALLOCATOR */
|
||||
#endif /* CONFIG_HAS_DMA */
|
||||
#ifdef CONFIG_SND_DMA_SGBUF
|
||||
[SNDRV_DMA_TYPE_DEV_SG] = &snd_dma_sg_ops,
|
||||
[SNDRV_DMA_TYPE_DEV_WC_SG] = &snd_dma_sg_ops,
|
||||
#endif
|
||||
};
|
||||
|
||||
static const struct snd_malloc_ops *snd_dma_get_ops(struct snd_dma_buffer *dmab)
|
||||
|
201
sound/core/sgbuf.c
Normal file
201
sound/core/sgbuf.c
Normal file
@ -0,0 +1,201 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* Scatter-Gather buffer
|
||||
*
|
||||
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/export.h>
|
||||
#include <sound/memalloc.h>
|
||||
#include "memalloc_local.h"
|
||||
|
||||
struct snd_sg_page {
|
||||
void *buf;
|
||||
dma_addr_t addr;
|
||||
};
|
||||
|
||||
struct snd_sg_buf {
|
||||
int size; /* allocated byte size */
|
||||
int pages; /* allocated pages */
|
||||
int tblsize; /* allocated table size */
|
||||
struct snd_sg_page *table; /* address table */
|
||||
struct page **page_table; /* page table (for vmap/vunmap) */
|
||||
struct device *dev;
|
||||
};
|
||||
|
||||
/* table entries are align to 32 */
|
||||
#define SGBUF_TBL_ALIGN 32
|
||||
#define sgbuf_align_table(tbl) ALIGN((tbl), SGBUF_TBL_ALIGN)
|
||||
|
||||
static void snd_dma_sg_free(struct snd_dma_buffer *dmab)
|
||||
{
|
||||
struct snd_sg_buf *sgbuf = dmab->private_data;
|
||||
struct snd_dma_buffer tmpb;
|
||||
int i;
|
||||
|
||||
if (!sgbuf)
|
||||
return;
|
||||
|
||||
vunmap(dmab->area);
|
||||
dmab->area = NULL;
|
||||
|
||||
tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
|
||||
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
|
||||
tmpb.dev.type = SNDRV_DMA_TYPE_DEV_WC;
|
||||
tmpb.dev.dev = sgbuf->dev;
|
||||
for (i = 0; i < sgbuf->pages; i++) {
|
||||
if (!(sgbuf->table[i].addr & ~PAGE_MASK))
|
||||
continue; /* continuous pages */
|
||||
tmpb.area = sgbuf->table[i].buf;
|
||||
tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
|
||||
tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
|
||||
snd_dma_free_pages(&tmpb);
|
||||
}
|
||||
|
||||
kfree(sgbuf->table);
|
||||
kfree(sgbuf->page_table);
|
||||
kfree(sgbuf);
|
||||
dmab->private_data = NULL;
|
||||
}
|
||||
|
||||
#define MAX_ALLOC_PAGES 32
|
||||
|
||||
static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
|
||||
{
|
||||
struct snd_sg_buf *sgbuf;
|
||||
unsigned int i, pages, chunk, maxpages;
|
||||
struct snd_dma_buffer tmpb;
|
||||
struct snd_sg_page *table;
|
||||
struct page **pgtable;
|
||||
int type = SNDRV_DMA_TYPE_DEV;
|
||||
pgprot_t prot = PAGE_KERNEL;
|
||||
void *area;
|
||||
|
||||
dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
|
||||
if (!sgbuf)
|
||||
return NULL;
|
||||
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG) {
|
||||
type = SNDRV_DMA_TYPE_DEV_WC;
|
||||
#ifdef pgprot_noncached
|
||||
prot = pgprot_noncached(PAGE_KERNEL);
|
||||
#endif
|
||||
}
|
||||
sgbuf->dev = dmab->dev.dev;
|
||||
pages = snd_sgbuf_aligned_pages(size);
|
||||
sgbuf->tblsize = sgbuf_align_table(pages);
|
||||
table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
|
||||
if (!table)
|
||||
goto _failed;
|
||||
sgbuf->table = table;
|
||||
pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
|
||||
if (!pgtable)
|
||||
goto _failed;
|
||||
sgbuf->page_table = pgtable;
|
||||
|
||||
/* allocate pages */
|
||||
maxpages = MAX_ALLOC_PAGES;
|
||||
while (pages > 0) {
|
||||
chunk = pages;
|
||||
/* don't be too eager to take a huge chunk */
|
||||
if (chunk > maxpages)
|
||||
chunk = maxpages;
|
||||
chunk <<= PAGE_SHIFT;
|
||||
if (snd_dma_alloc_pages_fallback(type, dmab->dev.dev,
|
||||
chunk, &tmpb) < 0) {
|
||||
if (!sgbuf->pages)
|
||||
goto _failed;
|
||||
size = sgbuf->pages * PAGE_SIZE;
|
||||
break;
|
||||
}
|
||||
chunk = tmpb.bytes >> PAGE_SHIFT;
|
||||
for (i = 0; i < chunk; i++) {
|
||||
table->buf = tmpb.area;
|
||||
table->addr = tmpb.addr;
|
||||
if (!i)
|
||||
table->addr |= chunk; /* mark head */
|
||||
table++;
|
||||
*pgtable++ = virt_to_page(tmpb.area);
|
||||
tmpb.area += PAGE_SIZE;
|
||||
tmpb.addr += PAGE_SIZE;
|
||||
}
|
||||
sgbuf->pages += chunk;
|
||||
pages -= chunk;
|
||||
if (chunk < maxpages)
|
||||
maxpages = chunk;
|
||||
}
|
||||
|
||||
sgbuf->size = size;
|
||||
area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
|
||||
if (!area)
|
||||
goto _failed;
|
||||
return area;
|
||||
|
||||
_failed:
|
||||
snd_dma_sg_free(dmab); /* free the table */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static dma_addr_t snd_dma_sg_get_addr(struct snd_dma_buffer *dmab,
|
||||
size_t offset)
|
||||
{
|
||||
struct snd_sg_buf *sgbuf = dmab->private_data;
|
||||
dma_addr_t addr;
|
||||
|
||||
addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
|
||||
addr &= ~((dma_addr_t)PAGE_SIZE - 1);
|
||||
return addr + offset % PAGE_SIZE;
|
||||
}
|
||||
|
||||
static struct page *snd_dma_sg_get_page(struct snd_dma_buffer *dmab,
|
||||
size_t offset)
|
||||
{
|
||||
struct snd_sg_buf *sgbuf = dmab->private_data;
|
||||
unsigned int idx = offset >> PAGE_SHIFT;
|
||||
|
||||
if (idx >= (unsigned int)sgbuf->pages)
|
||||
return NULL;
|
||||
return sgbuf->page_table[idx];
|
||||
}
|
||||
|
||||
static unsigned int snd_dma_sg_get_chunk_size(struct snd_dma_buffer *dmab,
|
||||
unsigned int ofs,
|
||||
unsigned int size)
|
||||
{
|
||||
struct snd_sg_buf *sg = dmab->private_data;
|
||||
unsigned int start, end, pg;
|
||||
|
||||
start = ofs >> PAGE_SHIFT;
|
||||
end = (ofs + size - 1) >> PAGE_SHIFT;
|
||||
/* check page continuity */
|
||||
pg = sg->table[start].addr >> PAGE_SHIFT;
|
||||
for (;;) {
|
||||
start++;
|
||||
if (start > end)
|
||||
break;
|
||||
pg++;
|
||||
if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
|
||||
return (start << PAGE_SHIFT) - ofs;
|
||||
}
|
||||
/* ok, all on continuous pages */
|
||||
return size;
|
||||
}
|
||||
|
||||
static int snd_dma_sg_mmap(struct snd_dma_buffer *dmab,
|
||||
struct vm_area_struct *area)
|
||||
{
|
||||
if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_WC_SG)
|
||||
area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
|
||||
return -ENOENT; /* continue with the default mmap handler */
|
||||
}
|
||||
|
||||
const struct snd_malloc_ops snd_dma_sg_ops = {
|
||||
.alloc = snd_dma_sg_alloc,
|
||||
.free = snd_dma_sg_free,
|
||||
.get_addr = snd_dma_sg_get_addr,
|
||||
.get_page = snd_dma_sg_get_page,
|
||||
.get_chunk_size = snd_dma_sg_get_chunk_size,
|
||||
.mmap = snd_dma_sg_mmap,
|
||||
};
|
Loading…
Reference in New Issue
Block a user