Merge tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - fix a regression in the ARM dma-direct conversion (Christoph Hellwig)

 - use memcpy_{from,to}_page (Fabio M. De Francesco)

 - cleanup the swiotlb MAINTAINERS entry (Lukas Bulwahn)

 - make SG table pool allocation less fragile (Masahiro Yamada)

 - don't panic on swiotlb initialization failure (Robin Murphy)

* tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping:
  ARM/dma-mapping: remove the dma_coherent member of struct dev_archdata
  ARM/dma-mappіng: don't override ->dma_coherent when set from a bus notifier
  lib/sg_pool: change module_init(sg_pool_init) to subsys_initcall
  MAINTAINERS: merge SWIOTLB SUBSYSTEM into DMA MAPPING HELPERS
  swiotlb: don't panic!
  swiotlb: replace kmap_atomic() with memcpy_{from,to}_page()
This commit is contained in:
Linus Torvalds
2022-10-10 13:24:55 -07:00
5 changed files with 38 additions and 46 deletions

View File

@ -346,22 +346,27 @@ retry:
memblock_free(tlb, PAGE_ALIGN(bytes));
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs < IO_TLB_MIN_SLABS)
panic("%s: Failed to remap %zu bytes\n",
__func__, bytes);
goto retry;
if (nslabs >= IO_TLB_MIN_SLABS)
goto retry;
pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
return;
}
alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
if (!mem->slots)
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
if (!mem->slots) {
pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
return;
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
default_nareas), SMP_CACHE_BYTES);
if (!mem->areas)
panic("%s: Failed to allocate mem->areas.\n", __func__);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
default_nareas);
@ -545,9 +550,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
}
if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer;
struct page *page;
unsigned int sz = 0;
unsigned long flags;
@ -555,12 +559,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn));
page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE)
memcpy(vaddr, buffer + offset, sz);
memcpy_from_page(vaddr, page, offset, sz);
else
memcpy(buffer + offset, vaddr, sz);
kunmap_atomic(buffer);
memcpy_to_page(page, offset, vaddr, sz);
local_irq_restore(flags);
size -= sz;
@ -731,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
int index;
phys_addr_t tlb_addr;
if (!mem || !mem->nslabs)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (!mem || !mem->nslabs) {
dev_warn_ratelimited(dev,
"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
return (phys_addr_t)DMA_MAPPING_ERROR;
}
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");