2019-05-19 15:07:45 +03:00
# SPDX-License-Identifier: GPL-2.0-only
2018-06-12 20:01:45 +03:00
2020-07-14 15:18:54 +03:00
config NO_DMA
bool
2018-06-12 20:01:45 +03:00
config HAS_DMA
bool
depends on !NO_DMA
default y
2020-07-08 10:30:00 +03:00
config DMA_OPS
2020-08-29 11:40:28 +03:00
depends on HAS_DMA
2020-07-08 10:30:00 +03:00
bool
2020-03-23 20:19:30 +03:00
#
# IOMMU drivers that can bypass the IOMMU code and optionally use the direct
# mapping fast path should select this option and set the dma_ops_bypass
# flag in struct device where applicable
#
config DMA_OPS_BYPASS
bool
2020-10-29 04:52:40 +03:00
# Lets platform IOMMU driver choose between bypass and IOMMU
config ARCH_HAS_DMA_MAP_DIRECT
bool
2023-06-12 18:31:56 +03:00
config NEED_SG_DMA_FLAGS
bool
2018-06-12 20:01:45 +03:00
config NEED_SG_DMA_LENGTH
bool
config NEED_DMA_MAP_STATE
bool
config ARCH_DMA_ADDR_T_64BIT
def_bool 64BIT || PHYS_ADDR_T_64BIT
2019-02-13 10:01:22 +03:00
config ARCH_HAS_DMA_SET_MASK
bool
2019-08-26 10:03:44 +03:00
#
# Select this option if the architecture needs special handling for
# DMA_ATTR_WRITE_COMBINE. Normally the "uncached" mapping should be what
2023-06-04 16:50:54 +03:00
# people think of when saying write combine, so very few platforms should
2019-08-26 10:03:44 +03:00
# need to enable this.
#
config ARCH_HAS_DMA_WRITE_COMBINE
bool
2020-08-17 17:41:50 +03:00
#
# Select if the architectures provides the arch_dma_mark_clean hook
#
config ARCH_HAS_DMA_MARK_CLEAN
bool
2019-02-03 22:12:02 +03:00
config DMA_DECLARE_COHERENT
2018-06-12 20:01:45 +03:00
bool
2019-01-07 21:36:20 +03:00
config ARCH_HAS_SETUP_DMA_OPS
bool
2018-12-22 00:14:44 +03:00
config ARCH_HAS_TEARDOWN_DMA_OPS
2018-06-12 20:01:45 +03:00
bool
config ARCH_HAS_SYNC_DMA_FOR_DEVICE
bool
config ARCH_HAS_SYNC_DMA_FOR_CPU
bool
select NEED_DMA_MAP_STATE
2018-09-11 09:54:57 +03:00
config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
bool
2019-03-25 17:44:06 +03:00
config ARCH_HAS_DMA_PREP_COHERENT
bool
2019-07-10 22:01:19 +03:00
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool
2023-04-01 12:15:30 +03:00
#
# Select this option if the architecture assumes DMA devices are coherent
# by default.
#
config ARCH_DMA_DEFAULT_COHERENT
bool
2018-06-12 20:01:45 +03:00
config SWIOTLB
bool
select NEED_DMA_MAP_STATE
2018-08-24 11:31:08 +03:00
2023-08-01 09:24:00 +03:00
config SWIOTLB_DYNAMIC
bool "Dynamic allocation of DMA bounce buffers"
default n
depends on SWIOTLB
help
This enables dynamic resizing of the software IO TLB. The kernel
starts with one memory pool at boot and it will allocate additional
pools as needed. To reduce run-time kernel memory requirements, you
may have to specify a smaller size of the initial pool using
"swiotlb=" on the kernel command line.
If unsure, say N.
2023-06-12 18:31:58 +03:00
config DMA_BOUNCE_UNALIGNED_KMALLOC
bool
depends on SWIOTLB
2021-06-19 06:40:41 +03:00
config DMA_RESTRICTED_POOL
bool "DMA Restricted Pool"
2021-08-27 06:48:02 +03:00
depends on OF && OF_RESERVED_MEM && SWIOTLB
2021-06-19 06:40:41 +03:00
help
This enables support for restricted DMA pools which provide a level of
DMA memory protection on systems with limited hardware protection
capabilities, such as those lacking an IOMMU.
For more information see
<Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt>
and <kernel/dma/swiotlb.c>.
If unsure, say "n".
2019-10-29 13:01:37 +03:00
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
2020-06-10 11:29:49 +03:00
default y if !MMU
2019-10-29 13:01:37 +03:00
bool
2020-06-11 10:25:57 +03:00
config DMA_COHERENT_POOL
2020-06-18 18:23:31 +03:00
select GENERIC_ALLOCATOR
2020-06-11 10:25:57 +03:00
bool
2021-06-23 15:21:16 +03:00
config DMA_GLOBAL_POOL
select DMA_DECLARE_COHERENT
2023-10-05 10:06:55 +03:00
depends on !ARCH_HAS_DMA_SET_UNCACHED
depends on !DMA_DIRECT_REMAP
2021-06-23 15:21:16 +03:00
bool
2020-04-15 03:04:52 +03:00
config DMA_DIRECT_REMAP
bool
select DMA_COHERENT_POOL
2022-02-26 18:40:21 +03:00
select DMA_NONCOHERENT_MMAP
2020-04-15 03:04:52 +03:00
2023-10-05 10:05:36 +03:00
#
# Fallback to arch code for DMA allocations. This should eventually go away.
#
config ARCH_HAS_DMA_ALLOC
depends on !ARCH_HAS_DMA_SET_UNCACHED
depends on !DMA_DIRECT_REMAP
depends on !DMA_GLOBAL_POOL
bool
2019-02-13 21:19:08 +03:00
config DMA_CMA
bool "DMA Contiguous Memory Allocator"
depends on HAVE_DMA_CONTIGUOUS && CMA
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
hardware components that do not support I/O map nor scatter-gather.
You can disable CMA by specifying "cma=0" on the kernel's command
line.
2020-09-11 11:56:52 +03:00
For more information see <kernel/dma/contiguous.c>.
2019-02-13 21:19:08 +03:00
If unsure, say "n".
if DMA_CMA
2020-08-24 02:03:07 +03:00
2023-07-12 10:47:58 +03:00
config DMA_NUMA_CMA
bool "Enable separate DMA Contiguous Memory Area for NUMA Node"
2023-08-30 12:25:25 +03:00
depends on NUMA
2020-08-24 02:03:07 +03:00
help
2023-07-12 10:47:58 +03:00
Enable this option to get numa CMA areas so that NUMA devices
2023-05-12 12:42:10 +03:00
can get local memory by DMA coherent APIs.
2020-08-24 02:03:07 +03:00
You can set the size of pernuma CMA by specifying "cma_pernuma=size"
2023-07-12 10:47:58 +03:00
or set the node id and its size of CMA by specifying "numa_cma=
<node>:size[,<node>:size]" on the kernel's command line.
2020-08-24 02:03:07 +03:00
2019-02-13 21:19:08 +03:00
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
int "Size in Mega Bytes"
depends on !CMA_SIZE_SEL_PERCENTAGE
default 0 if X86
default 16
help
Defines the size (in MiB) of the default memory area for Contiguous
Memory Allocator. If the size of 0 is selected, CMA is disabled by
default, but it can be enabled by passing cma=size[MG] to the kernel.
config CMA_SIZE_PERCENTAGE
int "Percentage of total memory"
depends on !CMA_SIZE_SEL_MBYTES
default 0 if X86
default 10
help
Defines the size of the default memory area for Contiguous Memory
Allocator as a percentage of the total memory in the system.
If 0 percent is selected, CMA is disabled by default, but it can be
enabled by passing cma=size[MG] to the kernel.
choice
prompt "Selected region size"
default CMA_SIZE_SEL_MBYTES
config CMA_SIZE_SEL_MBYTES
bool "Use mega bytes value only"
config CMA_SIZE_SEL_PERCENTAGE
bool "Use percentage value only"
config CMA_SIZE_SEL_MIN
bool "Use lower value (minimum)"
config CMA_SIZE_SEL_MAX
bool "Use higher value (maximum)"
endchoice
config CMA_ALIGNMENT
int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
2020-09-30 13:28:21 +03:00
range 2 12
2019-02-13 21:19:08 +03:00
default 8
help
DMA mapping framework by default aligns all buffers to the smallest
PAGE_SIZE order which is greater than or equal to the requested buffer
size. This works well for buffers up to a few hundreds kilobytes, but
for larger buffers it just a memory waste. With this parameter you can
specify the maximum PAGE_SIZE order for contiguous buffers. Larger
buffers will be aligned only to this specified order. The order is
expressed as a power of two multiplied by the PAGE_SIZE.
For example, if your system defaults to 4KiB pages, the order value
of 8 means that the buffers will be aligned up to 1MiB only.
If unsure, leave the default value "8".
endif
2019-02-11 19:12:30 +03:00
config DMA_API_DEBUG
bool "Enable debugging of DMA-API usage"
select NEED_DMA_MAP_STATE
help
Enable this option to debug the use of the DMA API by device drivers.
With this option you will be able to detect common bugs in device
drivers like double-freeing of DMA mappings or freeing mappings that
were never allocated.
This option causes a performance degradation. Use only if you want to
debug device drivers and dma interactions.
If unsure, say N.
config DMA_API_DEBUG_SG
bool "Debug DMA scatter-gather usage"
default y
depends on DMA_API_DEBUG
help
Perform extra checking that callers of dma_map_sg() have respected the
appropriate segment length/boundary limits for the given device when
preparing DMA scatterlists.
This is particularly likely to have been overlooked in cases where the
dma_map_sg() API is used for general bulk mapping of pages rather than
preparing literal scatter-gather descriptors, where there is a risk of
unexpected behaviour from DMA API implementations if the scatterlist
is technically out-of-spec.
If unsure, say N.
2020-11-16 09:08:47 +03:00
config DMA_MAP_BENCHMARK
bool "Enable benchmarking of streaming DMA mapping"
depends on DEBUG_FS
help
Provides /sys/kernel/debug/dma_map_benchmark that helps with testing
performance of dma_(un)map_page.
See tools/testing/selftests/dma/dma_map_benchmark.c