Merge branch 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping
Pull DMA mapping branch from Marek Szyprowski:
"Short summary for the whole series:
A few limitations have been identified in the current dma-mapping
design and its implementations for various architectures. There exist
more than one function for allocating and freeing the buffers:
currently these 3 are used dma_{alloc, free}_coherent,
dma_{alloc,free}_writecombine, dma_{alloc,free}_noncoherent.
For most of the systems these calls are almost equivalent and can be
interchanged. For others, especially the truly non-coherent ones
(like ARM), the difference can be easily noticed in overall driver
performance. Sadly not all architectures provide implementations for
all of them, so the drivers might need to be adapted and cannot be
easily shared between different architectures. The provided patches
unify all these functions and hide the differences under the already
existing dma attributes concept. The thread with more references is
available here:
http://www.spinics.net/lists/linux-sh/msg09777.html
These patches are also a prerequisite for unifying DMA-mapping
implementation on ARM architecture with the common one provided by
dma_map_ops structure and extending it with IOMMU support. More
information is available in the following thread:
http://thread.gmane.org/gmane.linux.kernel.cross-arch/12819
More works on dma-mapping framework are planned, especially in the
area of buffer sharing and managing the shared mappings (together with
the recently introduced dma_buf interface: commit d15bd7ee44
"dma-buf: Introduce dma buffer sharing mechanism").
The patches in the current set introduce a new alloc/free methods
(with support for memory attributes) in dma_map_ops structure, which
will later replace dma_alloc_coherent and dma_alloc_writecombine
functions."
People finally started piping up with support for merging this, so I'm
merging it as the last of the pending stuff from the merge window.
Looks like pohmelfs is going to wait for 3.5 and more external support
for merging.
* 'for-linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
common: DMA-mapping: add NON-CONSISTENT attribute
common: DMA-mapping: add WRITE_COMBINE attribute
common: dma-mapping: introduce mmap method
common: dma-mapping: remove old alloc_coherent and free_coherent methods
Hexagon: adapt for dma_map_ops changes
Unicore32: adapt for dma_map_ops changes
Microblaze: adapt for dma_map_ops changes
SH: adapt for dma_map_ops changes
Alpha: adapt for dma_map_ops changes
SPARC: adapt for dma_map_ops changes
PowerPC: adapt for dma_map_ops changes
MIPS: adapt for dma_map_ops changes
X86 & IA64: adapt for dma_map_ops changes
common: dma-mapping: introduce generic alloc() and free() methods
This commit is contained in:
commit
58bca4a8fa
@ -31,3 +31,21 @@ may be weakly ordered, that is that reads and writes may pass each other.
|
||||
Since it is optional for platforms to implement DMA_ATTR_WEAK_ORDERING,
|
||||
those that do not will simply ignore the attribute and exhibit default
|
||||
behavior.
|
||||
|
||||
DMA_ATTR_WRITE_COMBINE
|
||||
----------------------
|
||||
|
||||
DMA_ATTR_WRITE_COMBINE specifies that writes to the mapping may be
|
||||
buffered to improve performance.
|
||||
|
||||
Since it is optional for platforms to implement DMA_ATTR_WRITE_COMBINE,
|
||||
those that do not will simply ignore the attribute and exhibit default
|
||||
behavior.
|
||||
|
||||
DMA_ATTR_NON_CONSISTENT
|
||||
-----------------------
|
||||
|
||||
DMA_ATTR_NON_CONSISTENT lets the platform to choose to return either
|
||||
consistent or non-consistent memory as it sees fit. By using this API,
|
||||
you are guaranteeing to the platform that you have all the correct and
|
||||
necessary sync points for this memory in the driver.
|
||||
|
@ -12,16 +12,22 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return get_dma_ops(dev)->alloc_coherent(dev, size, dma_handle, gfp);
|
||||
return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
get_dma_ops(dev)->free_coherent(dev, size, vaddr, dma_handle);
|
||||
get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
|
@ -108,7 +108,8 @@ sys_pciconfig_write(unsigned long bus, unsigned long dfn,
|
||||
}
|
||||
|
||||
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@ -123,7 +124,8 @@ static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void alpha_noop_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr)
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
free_pages((unsigned long)cpu_addr, get_order(size));
|
||||
}
|
||||
@ -174,8 +176,8 @@ static int alpha_noop_set_mask(struct device *dev, u64 mask)
|
||||
}
|
||||
|
||||
struct dma_map_ops alpha_noop_ops = {
|
||||
.alloc_coherent = alpha_noop_alloc_coherent,
|
||||
.free_coherent = alpha_noop_free_coherent,
|
||||
.alloc = alpha_noop_alloc_coherent,
|
||||
.free = alpha_noop_free_coherent,
|
||||
.map_page = alpha_noop_map_page,
|
||||
.map_sg = alpha_noop_map_sg,
|
||||
.mapping_error = alpha_noop_mapping_error,
|
||||
|
@ -434,7 +434,8 @@ static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
else DMA_ADDRP is undefined. */
|
||||
|
||||
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
void *cpu_addr;
|
||||
@ -478,7 +479,8 @@ try_again:
|
||||
DMA_ADDR past this call are illegal. */
|
||||
|
||||
static void alpha_pci_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_addr)
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
|
||||
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
|
||||
@ -952,8 +954,8 @@ static int alpha_pci_set_mask(struct device *dev, u64 mask)
|
||||
}
|
||||
|
||||
struct dma_map_ops alpha_pci_ops = {
|
||||
.alloc_coherent = alpha_pci_alloc_coherent,
|
||||
.free_coherent = alpha_pci_free_coherent,
|
||||
.alloc = alpha_pci_alloc_coherent,
|
||||
.free = alpha_pci_free_coherent,
|
||||
.map_page = alpha_pci_map_page,
|
||||
.unmap_page = alpha_pci_unmap_page,
|
||||
.map_sg = alpha_pci_map_sg,
|
||||
|
@ -71,29 +71,35 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
return (dma_addr == bad_dma_address);
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
ret = ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
ret = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
}
|
||||
|
@ -54,7 +54,8 @@ static struct gen_pool *coherent_pool;
|
||||
/* Allocates from a pool of uncached memory that was reserved at boot time */
|
||||
|
||||
void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag)
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@ -81,7 +82,7 @@ void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
|
||||
}
|
||||
@ -202,8 +203,8 @@ static void hexagon_sync_single_for_device(struct device *dev,
|
||||
}
|
||||
|
||||
struct dma_map_ops hexagon_dma_ops = {
|
||||
.alloc_coherent = hexagon_dma_alloc_coherent,
|
||||
.free_coherent = hexagon_free_coherent,
|
||||
.alloc = hexagon_dma_alloc_coherent,
|
||||
.free = hexagon_free_coherent,
|
||||
.map_sg = hexagon_map_sg,
|
||||
.map_page = hexagon_map_page,
|
||||
.sync_single_for_cpu = hexagon_sync_single_for_cpu,
|
||||
|
@ -1129,7 +1129,8 @@ void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
|
||||
* See Documentation/DMA-API-HOWTO.txt
|
||||
*/
|
||||
static void *
|
||||
sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
|
||||
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t flags, struct dma_attrs *attrs)
|
||||
{
|
||||
struct ioc *ioc;
|
||||
void *addr;
|
||||
@ -1191,8 +1192,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
|
||||
*
|
||||
* See Documentation/DMA-API-HOWTO.txt
|
||||
*/
|
||||
static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
|
||||
free_pages((unsigned long) vaddr, get_order(size));
|
||||
@ -2212,8 +2213,8 @@ sba_page_override(char *str)
|
||||
__setup("sbapagesize=",sba_page_override);
|
||||
|
||||
struct dma_map_ops sba_dma_ops = {
|
||||
.alloc_coherent = sba_alloc_coherent,
|
||||
.free_coherent = sba_free_coherent,
|
||||
.alloc = sba_alloc_coherent,
|
||||
.free = sba_free_coherent,
|
||||
.map_page = sba_map_page,
|
||||
.unmap_page = sba_unmap_page,
|
||||
.map_sg = sba_map_sg_attrs,
|
||||
|
@ -23,23 +23,29 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
||||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||
enum dma_data_direction);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *daddr, gfp_t gfp)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *daddr, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
void *caddr;
|
||||
|
||||
caddr = ops->alloc_coherent(dev, size, daddr, gfp);
|
||||
caddr = ops->alloc(dev, size, daddr, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
|
||||
return caddr;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *caddr, dma_addr_t daddr)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *caddr, dma_addr_t daddr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
||||
debug_dma_free_coherent(dev, size, caddr, daddr);
|
||||
ops->free_coherent(dev, size, caddr, daddr);
|
||||
ops->free(dev, size, caddr, daddr, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
|
@ -15,16 +15,24 @@ int swiotlb __read_mostly;
|
||||
EXPORT_SYMBOL(swiotlb);
|
||||
|
||||
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
|
||||
gfp |= GFP_DMA;
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
||||
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc_coherent = ia64_swiotlb_alloc_coherent,
|
||||
.free_coherent = swiotlb_free_coherent,
|
||||
.alloc = ia64_swiotlb_alloc_coherent,
|
||||
.free = ia64_swiotlb_free_coherent,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
|
@ -76,7 +76,8 @@ EXPORT_SYMBOL(sn_dma_set_mask);
|
||||
* more information.
|
||||
*/
|
||||
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle, gfp_t flags)
|
||||
dma_addr_t * dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *cpuaddr;
|
||||
unsigned long phys_addr;
|
||||
@ -137,7 +138,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
* any associated IOMMU mappings.
|
||||
*/
|
||||
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
|
||||
@ -466,8 +467,8 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
|
||||
}
|
||||
|
||||
static struct dma_map_ops sn_dma_ops = {
|
||||
.alloc_coherent = sn_dma_alloc_coherent,
|
||||
.free_coherent = sn_dma_free_coherent,
|
||||
.alloc = sn_dma_alloc_coherent,
|
||||
.free = sn_dma_free_coherent,
|
||||
.map_page = sn_dma_map_page,
|
||||
.unmap_page = sn_dma_unmap_page,
|
||||
.map_sg = sn_dma_map_sg,
|
||||
|
@ -123,28 +123,34 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
BUG_ON(!ops);
|
||||
|
||||
memory = ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
return memory;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!ops);
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
|
@ -33,7 +33,8 @@ static unsigned long get_dma_direct_offset(struct device *dev)
|
||||
#define NOT_COHERENT_CACHE
|
||||
|
||||
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
return consistent_alloc(flag, size, dma_handle);
|
||||
@ -57,7 +58,8 @@ static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef NOT_COHERENT_CACHE
|
||||
consistent_free(size, vaddr);
|
||||
@ -176,8 +178,8 @@ dma_direct_sync_sg_for_device(struct device *dev,
|
||||
}
|
||||
|
||||
struct dma_map_ops dma_direct_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
|
@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
|
||||
}
|
||||
|
||||
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@ -192,7 +192,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void octeon_dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
|
||||
@ -240,8 +240,8 @@ EXPORT_SYMBOL(dma_to_phys);
|
||||
|
||||
static struct octeon_dma_map_ops octeon_linear_dma_map_ops = {
|
||||
.dma_map_ops = {
|
||||
.alloc_coherent = octeon_dma_alloc_coherent,
|
||||
.free_coherent = octeon_dma_free_coherent,
|
||||
.alloc = octeon_dma_alloc_coherent,
|
||||
.free = octeon_dma_free_coherent,
|
||||
.map_page = octeon_dma_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = octeon_dma_map_sg,
|
||||
@ -325,8 +325,8 @@ void __init plat_swiotlb_setup(void)
|
||||
#ifdef CONFIG_PCI
|
||||
static struct octeon_dma_map_ops _octeon_pci_dma_map_ops = {
|
||||
.dma_map_ops = {
|
||||
.alloc_coherent = octeon_dma_alloc_coherent,
|
||||
.free_coherent = octeon_dma_free_coherent,
|
||||
.alloc = octeon_dma_alloc_coherent,
|
||||
.free = octeon_dma_free_coherent,
|
||||
.map_page = octeon_dma_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.map_sg = octeon_dma_map_sg,
|
||||
|
@ -57,25 +57,31 @@ dma_set_mask(struct device *dev, u64 mask)
|
||||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
|
||||
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
ops->free_coherent(dev, size, vaddr, dma_handle);
|
||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
EXPORT_SYMBOL(dma_alloc_noncoherent);
|
||||
|
||||
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle, gfp_t gfp)
|
||||
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
@ -132,7 +132,7 @@ void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
|
||||
EXPORT_SYMBOL(dma_free_noncoherent);
|
||||
|
||||
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long addr = (unsigned long) vaddr;
|
||||
int order = get_order(size);
|
||||
@ -323,8 +323,8 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
EXPORT_SYMBOL(dma_cache_sync);
|
||||
|
||||
static struct dma_map_ops mips_default_dma_map_ops = {
|
||||
.alloc_coherent = mips_dma_alloc_coherent,
|
||||
.free_coherent = mips_dma_free_coherent,
|
||||
.alloc = mips_dma_alloc_coherent,
|
||||
.free = mips_dma_free_coherent,
|
||||
.map_page = mips_dma_map_page,
|
||||
.unmap_page = mips_dma_unmap_page,
|
||||
.map_sg = mips_dma_map_sg,
|
||||
|
@ -22,9 +22,11 @@
|
||||
|
||||
/* Some dma direct funcs must be visible for use in other dma_ops */
|
||||
extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs);
|
||||
extern void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
@ -130,23 +132,29 @@ static inline int dma_supported(struct device *dev, u64 mask)
|
||||
|
||||
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
cpu_addr = dma_ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
@ -154,7 +162,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
|
||||
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
|
@ -17,7 +17,8 @@
|
||||
* to the dma address (mapping) of the first page.
|
||||
*/
|
||||
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
|
||||
dma_handle, dev->coherent_dma_mask, flag,
|
||||
@ -25,7 +26,8 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
|
||||
}
|
||||
@ -105,8 +107,8 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
|
||||
}
|
||||
|
||||
struct dma_map_ops dma_iommu_ops = {
|
||||
.alloc_coherent = dma_iommu_alloc_coherent,
|
||||
.free_coherent = dma_iommu_free_coherent,
|
||||
.alloc = dma_iommu_alloc_coherent,
|
||||
.free = dma_iommu_free_coherent,
|
||||
.map_sg = dma_iommu_map_sg,
|
||||
.unmap_sg = dma_iommu_unmap_sg,
|
||||
.dma_supported = dma_iommu_dma_supported,
|
||||
|
@ -47,8 +47,8 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
|
||||
* for everything else.
|
||||
*/
|
||||
struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
|
@ -26,7 +26,8 @@
|
||||
|
||||
|
||||
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
@ -54,7 +55,8 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
__dma_free_coherent(size, vaddr);
|
||||
@ -150,8 +152,8 @@ static inline void dma_direct_sync_single(struct device *dev,
|
||||
#endif
|
||||
|
||||
struct dma_map_ops dma_direct_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.alloc = dma_direct_alloc_coherent,
|
||||
.free = dma_direct_free_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
|
@ -65,7 +65,8 @@ static struct of_device_id __initdata ibmebus_matches[] = {
|
||||
static void *ibmebus_alloc_coherent(struct device *dev,
|
||||
size_t size,
|
||||
dma_addr_t *dma_handle,
|
||||
gfp_t flag)
|
||||
gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *mem;
|
||||
|
||||
@ -77,7 +78,8 @@ static void *ibmebus_alloc_coherent(struct device *dev,
|
||||
|
||||
static void ibmebus_free_coherent(struct device *dev,
|
||||
size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
kfree(vaddr);
|
||||
}
|
||||
@ -136,8 +138,8 @@ static u64 ibmebus_dma_get_required_mask(struct device *dev)
|
||||
}
|
||||
|
||||
static struct dma_map_ops ibmebus_dma_ops = {
|
||||
.alloc_coherent = ibmebus_alloc_coherent,
|
||||
.free_coherent = ibmebus_free_coherent,
|
||||
.alloc = ibmebus_alloc_coherent,
|
||||
.free = ibmebus_free_coherent,
|
||||
.map_sg = ibmebus_map_sg,
|
||||
.unmap_sg = ibmebus_unmap_sg,
|
||||
.dma_supported = ibmebus_dma_supported,
|
||||
|
@ -482,7 +482,8 @@ static void vio_cmo_balance(struct work_struct *work)
|
||||
}
|
||||
|
||||
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
void *ret;
|
||||
@ -492,7 +493,7 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
|
||||
ret = dma_iommu_ops.alloc(dev, size, dma_handle, flag, attrs);
|
||||
if (unlikely(ret == NULL)) {
|
||||
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
||||
atomic_inc(&viodev->cmo.allocs_failed);
|
||||
@ -502,11 +503,12 @@ static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct vio_dev *viodev = to_vio_dev(dev);
|
||||
|
||||
dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
|
||||
dma_iommu_ops.free(dev, size, vaddr, dma_handle, attrs);
|
||||
|
||||
vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
|
||||
}
|
||||
@ -607,8 +609,8 @@ static u64 vio_dma_get_required_mask(struct device *dev)
|
||||
}
|
||||
|
||||
struct dma_map_ops vio_dma_mapping_ops = {
|
||||
.alloc_coherent = vio_dma_iommu_alloc_coherent,
|
||||
.free_coherent = vio_dma_iommu_free_coherent,
|
||||
.alloc = vio_dma_iommu_alloc_coherent,
|
||||
.free = vio_dma_iommu_free_coherent,
|
||||
.map_sg = vio_dma_iommu_map_sg,
|
||||
.unmap_sg = vio_dma_iommu_unmap_sg,
|
||||
.map_page = vio_dma_iommu_map_page,
|
||||
|
@ -564,7 +564,8 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
|
||||
/* A coherent allocation implies strong ordering */
|
||||
|
||||
static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak)
|
||||
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
|
||||
@ -572,18 +573,19 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
|
||||
device_to_mask(dev), flag,
|
||||
dev_to_node(dev));
|
||||
else
|
||||
return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
|
||||
flag);
|
||||
return dma_direct_ops.alloc(dev, size, dma_handle, flag,
|
||||
attrs);
|
||||
}
|
||||
|
||||
static void dma_fixed_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
if (iommu_fixed_is_weak)
|
||||
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
|
||||
dma_handle);
|
||||
else
|
||||
dma_direct_ops.free_coherent(dev, size, vaddr, dma_handle);
|
||||
dma_direct_ops.free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
|
||||
@ -642,8 +644,8 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
|
||||
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
|
||||
|
||||
struct dma_map_ops dma_iommu_fixed_ops = {
|
||||
.alloc_coherent = dma_fixed_alloc_coherent,
|
||||
.free_coherent = dma_fixed_free_coherent,
|
||||
.alloc = dma_fixed_alloc_coherent,
|
||||
.free = dma_fixed_free_coherent,
|
||||
.map_sg = dma_fixed_map_sg,
|
||||
.unmap_sg = dma_fixed_unmap_sg,
|
||||
.dma_supported = dma_fixed_dma_supported,
|
||||
|
@ -515,7 +515,8 @@ core_initcall(ps3_system_bus_init);
|
||||
* to the dma address (mapping) of the first page.
|
||||
*/
|
||||
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
int result;
|
||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||
@ -552,7 +553,7 @@ clean_none:
|
||||
}
|
||||
|
||||
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
|
||||
|
||||
@ -701,8 +702,8 @@ static u64 ps3_dma_get_required_mask(struct device *_dev)
|
||||
}
|
||||
|
||||
static struct dma_map_ops ps3_sb_dma_ops = {
|
||||
.alloc_coherent = ps3_alloc_coherent,
|
||||
.free_coherent = ps3_free_coherent,
|
||||
.alloc = ps3_alloc_coherent,
|
||||
.free = ps3_free_coherent,
|
||||
.map_sg = ps3_sb_map_sg,
|
||||
.unmap_sg = ps3_sb_unmap_sg,
|
||||
.dma_supported = ps3_dma_supported,
|
||||
@ -712,8 +713,8 @@ static struct dma_map_ops ps3_sb_dma_ops = {
|
||||
};
|
||||
|
||||
static struct dma_map_ops ps3_ioc0_dma_ops = {
|
||||
.alloc_coherent = ps3_alloc_coherent,
|
||||
.free_coherent = ps3_free_coherent,
|
||||
.alloc = ps3_alloc_coherent,
|
||||
.free = ps3_free_coherent,
|
||||
.map_sg = ps3_ioc0_map_sg,
|
||||
.unmap_sg = ps3_ioc0_unmap_sg,
|
||||
.dma_supported = ps3_dma_supported,
|
||||
|
@ -52,25 +52,31 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
return dma_addr == 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
|
||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
||||
return memory;
|
||||
if (!ops->alloc_coherent)
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
memory = ops->alloc_coherent(dev, size, dma_handle, gfp);
|
||||
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
@ -78,14 +84,16 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
||||
if (ops->free_coherent)
|
||||
ops->free_coherent(dev, size, vaddr, dma_handle);
|
||||
if (ops->free)
|
||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
/* arch/sh/mm/consistent.c */
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag);
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
struct dma_attrs *attrs);
|
||||
extern void dma_generic_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
#endif /* __ASM_SH_DMA_MAPPING_H */
|
||||
|
@ -63,8 +63,8 @@ static void nommu_sync_sg(struct device *dev, struct scatterlist *sg,
|
||||
#endif
|
||||
|
||||
struct dma_map_ops nommu_dma_ops = {
|
||||
.alloc_coherent = dma_generic_alloc_coherent,
|
||||
.free_coherent = dma_generic_free_coherent,
|
||||
.alloc = dma_generic_alloc_coherent,
|
||||
.free = dma_generic_free_coherent,
|
||||
.map_page = nommu_map_page,
|
||||
.map_sg = nommu_map_sg,
|
||||
#ifdef CONFIG_DMA_NONCOHERENT
|
||||
|
@ -33,7 +33,8 @@ static int __init dma_init(void)
|
||||
fs_initcall(dma_init);
|
||||
|
||||
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret, *ret_nocache;
|
||||
int order = get_order(size);
|
||||
@ -64,7 +65,8 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
void dma_generic_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
unsigned long pfn = dma_handle >> PAGE_SHIFT;
|
||||
|
@ -26,24 +26,30 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||
|
||||
#include <asm-generic/dma-mapping-common.h>
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *cpu_addr;
|
||||
|
||||
cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||
return cpu_addr;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
|
@ -280,7 +280,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
|
||||
}
|
||||
|
||||
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long flags, order, first_page;
|
||||
struct iommu *iommu;
|
||||
@ -330,7 +331,8 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static void dma_4u_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu, dma_addr_t dvma)
|
||||
void *cpu, dma_addr_t dvma,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct iommu *iommu;
|
||||
unsigned long flags, order, npages;
|
||||
@ -825,8 +827,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
|
||||
}
|
||||
|
||||
static struct dma_map_ops sun4u_dma_ops = {
|
||||
.alloc_coherent = dma_4u_alloc_coherent,
|
||||
.free_coherent = dma_4u_free_coherent,
|
||||
.alloc = dma_4u_alloc_coherent,
|
||||
.free = dma_4u_free_coherent,
|
||||
.map_page = dma_4u_map_page,
|
||||
.unmap_page = dma_4u_unmap_page,
|
||||
.map_sg = dma_4u_map_sg,
|
||||
|
@ -261,7 +261,8 @@ EXPORT_SYMBOL(sbus_set_sbus64);
|
||||
* CPU may access them without any explicit flushing.
|
||||
*/
|
||||
static void *sbus_alloc_coherent(struct device *dev, size_t len,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct platform_device *op = to_platform_device(dev);
|
||||
unsigned long len_total = PAGE_ALIGN(len);
|
||||
@ -315,7 +316,7 @@ err_nopages:
|
||||
}
|
||||
|
||||
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
|
||||
dma_addr_t ba)
|
||||
dma_addr_t ba, struct dma_attrs *attrs)
|
||||
{
|
||||
struct resource *res;
|
||||
struct page *pgv;
|
||||
@ -407,8 +408,8 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||
}
|
||||
|
||||
struct dma_map_ops sbus_dma_ops = {
|
||||
.alloc_coherent = sbus_alloc_coherent,
|
||||
.free_coherent = sbus_free_coherent,
|
||||
.alloc = sbus_alloc_coherent,
|
||||
.free = sbus_free_coherent,
|
||||
.map_page = sbus_map_page,
|
||||
.unmap_page = sbus_unmap_page,
|
||||
.map_sg = sbus_map_sg,
|
||||
@ -436,7 +437,8 @@ arch_initcall(sparc_register_ioport);
|
||||
* hwdev should be valid struct pci_dev pointer for PCI devices.
|
||||
*/
|
||||
static void *pci32_alloc_coherent(struct device *dev, size_t len,
|
||||
dma_addr_t *pba, gfp_t gfp)
|
||||
dma_addr_t *pba, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long len_total = PAGE_ALIGN(len);
|
||||
void *va;
|
||||
@ -489,7 +491,7 @@ err_nopages:
|
||||
* past this call are illegal.
|
||||
*/
|
||||
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
|
||||
dma_addr_t ba)
|
||||
dma_addr_t ba, struct dma_attrs *attrs)
|
||||
{
|
||||
struct resource *res;
|
||||
|
||||
@ -645,8 +647,8 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
|
||||
}
|
||||
|
||||
struct dma_map_ops pci32_dma_ops = {
|
||||
.alloc_coherent = pci32_alloc_coherent,
|
||||
.free_coherent = pci32_free_coherent,
|
||||
.alloc = pci32_alloc_coherent,
|
||||
.free = pci32_free_coherent,
|
||||
.map_page = pci32_map_page,
|
||||
.unmap_page = pci32_unmap_page,
|
||||
.map_sg = pci32_map_sg,
|
||||
|
@ -128,7 +128,8 @@ static inline long iommu_batch_end(void)
|
||||
}
|
||||
|
||||
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addrp, gfp_t gfp)
|
||||
dma_addr_t *dma_addrp, gfp_t gfp,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long flags, order, first_page, npages, n;
|
||||
struct iommu *iommu;
|
||||
@ -198,7 +199,7 @@ range_alloc_fail:
|
||||
}
|
||||
|
||||
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
|
||||
dma_addr_t dvma)
|
||||
dma_addr_t dvma, struct dma_attrs *attrs)
|
||||
{
|
||||
struct pci_pbm_info *pbm;
|
||||
struct iommu *iommu;
|
||||
@ -527,8 +528,8 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
}
|
||||
|
||||
static struct dma_map_ops sun4v_dma_ops = {
|
||||
.alloc_coherent = dma_4v_alloc_coherent,
|
||||
.free_coherent = dma_4v_free_coherent,
|
||||
.alloc = dma_4v_alloc_coherent,
|
||||
.free = dma_4v_free_coherent,
|
||||
.map_page = dma_4v_map_page,
|
||||
.unmap_page = dma_4v_unmap_page,
|
||||
.map_sg = dma_4v_map_sg,
|
||||
|
@ -82,20 +82,26 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
|
||||
return dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *cpu_addr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
|
||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||
}
|
||||
|
||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||
|
@ -17,9 +17,23 @@
|
||||
|
||||
#include <asm/dma.h>
|
||||
|
||||
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
|
||||
}
|
||||
|
||||
static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
struct dma_map_ops swiotlb_dma_map_ops = {
|
||||
.alloc_coherent = swiotlb_alloc_coherent,
|
||||
.free_coherent = swiotlb_free_coherent,
|
||||
.alloc = unicore_swiotlb_alloc_coherent,
|
||||
.free = unicore_swiotlb_free_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
|
@ -59,7 +59,8 @@ extern int dma_supported(struct device *hwdev, u64 mask);
|
||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
||||
|
||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag);
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
@ -111,9 +112,11 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
||||
return gfp;
|
||||
}
|
||||
|
||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
||||
|
||||
static inline void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp)
|
||||
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
void *memory;
|
||||
@ -129,18 +132,21 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
if (!is_device_dma_capable(dev))
|
||||
return NULL;
|
||||
|
||||
if (!ops->alloc_coherent)
|
||||
if (!ops->alloc)
|
||||
return NULL;
|
||||
|
||||
memory = ops->alloc_coherent(dev, size, dma_handle,
|
||||
dma_alloc_coherent_gfp_flags(dev, gfp));
|
||||
memory = ops->alloc(dev, size, dma_handle,
|
||||
dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
|
||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
||||
|
||||
return memory;
|
||||
}
|
||||
|
||||
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t bus)
|
||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
||||
|
||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t bus,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||
|
||||
@ -150,8 +156,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||
return;
|
||||
|
||||
debug_dma_free_coherent(dev, size, vaddr, bus);
|
||||
if (ops->free_coherent)
|
||||
ops->free_coherent(dev, size, vaddr, bus);
|
||||
if (ops->free)
|
||||
ops->free(dev, size, vaddr, bus, attrs);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -477,7 +477,7 @@ error:
|
||||
/* allocate and map a coherent mapping */
|
||||
static void *
|
||||
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||
gfp_t flag)
|
||||
gfp_t flag, struct dma_attrs *attrs)
|
||||
{
|
||||
dma_addr_t paddr;
|
||||
unsigned long align_mask;
|
||||
@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||
}
|
||||
__free_pages(page, get_order(size));
|
||||
} else
|
||||
return dma_generic_alloc_coherent(dev, size, dma_addr, flag);
|
||||
return dma_generic_alloc_coherent(dev, size, dma_addr, flag,
|
||||
attrs);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
|
||||
/* free a coherent mapping */
|
||||
static void
|
||||
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = {
|
||||
.unmap_sg = gart_unmap_sg,
|
||||
.map_page = gart_map_page,
|
||||
.unmap_page = gart_unmap_page,
|
||||
.alloc_coherent = gart_alloc_coherent,
|
||||
.free_coherent = gart_free_coherent,
|
||||
.alloc = gart_alloc_coherent,
|
||||
.free = gart_free_coherent,
|
||||
.mapping_error = gart_mapping_error,
|
||||
};
|
||||
|
||||
|
@ -430,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
}
|
||||
|
||||
static void* calgary_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret = NULL;
|
||||
dma_addr_t mapping;
|
||||
@ -463,7 +463,8 @@ error:
|
||||
}
|
||||
|
||||
static void calgary_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned int npages;
|
||||
struct iommu_table *tbl = find_iommu_table(dev);
|
||||
@ -476,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size,
|
||||
}
|
||||
|
||||
static struct dma_map_ops calgary_dma_ops = {
|
||||
.alloc_coherent = calgary_alloc_coherent,
|
||||
.free_coherent = calgary_free_coherent,
|
||||
.alloc = calgary_alloc_coherent,
|
||||
.free = calgary_free_coherent,
|
||||
.map_sg = calgary_map_sg,
|
||||
.unmap_sg = calgary_unmap_sg,
|
||||
.map_page = calgary_map_page,
|
||||
|
@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void)
|
||||
}
|
||||
}
|
||||
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag)
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long dma_mask;
|
||||
struct page *page;
|
||||
|
@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
|
||||
}
|
||||
|
||||
static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
dma_addr_t dma_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev,
|
||||
}
|
||||
|
||||
struct dma_map_ops nommu_dma_ops = {
|
||||
.alloc_coherent = dma_generic_alloc_coherent,
|
||||
.free_coherent = nommu_free_coherent,
|
||||
.alloc = dma_generic_alloc_coherent,
|
||||
.free = nommu_free_coherent,
|
||||
.map_sg = nommu_map_sg,
|
||||
.map_page = nommu_map_page,
|
||||
.sync_single_for_device = nommu_sync_single_for_device,
|
||||
|
@ -15,21 +15,30 @@
|
||||
int swiotlb __read_mostly;
|
||||
|
||||
static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags)
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *vaddr;
|
||||
|
||||
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags);
|
||||
vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
|
||||
attrs);
|
||||
if (vaddr)
|
||||
return vaddr;
|
||||
|
||||
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
|
||||
}
|
||||
|
||||
static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
|
||||
}
|
||||
|
||||
static struct dma_map_ops swiotlb_dma_ops = {
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
.alloc_coherent = x86_swiotlb_alloc_coherent,
|
||||
.free_coherent = swiotlb_free_coherent,
|
||||
.alloc = x86_swiotlb_alloc_coherent,
|
||||
.free = x86_swiotlb_free_coherent,
|
||||
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
|
@ -12,8 +12,8 @@ int xen_swiotlb __read_mostly;
|
||||
|
||||
static struct dma_map_ops xen_swiotlb_dma_ops = {
|
||||
.mapping_error = xen_swiotlb_dma_mapping_error,
|
||||
.alloc_coherent = xen_swiotlb_alloc_coherent,
|
||||
.free_coherent = xen_swiotlb_free_coherent,
|
||||
.alloc = xen_swiotlb_alloc_coherent,
|
||||
.free = xen_swiotlb_free_coherent,
|
||||
.sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu,
|
||||
.sync_single_for_device = xen_swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu,
|
||||
|
@ -2707,7 +2707,8 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
|
||||
* The exported alloc_coherent function for dma_ops.
|
||||
*/
|
||||
static void *alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_addr, gfp_t flag)
|
||||
dma_addr_t *dma_addr, gfp_t flag,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *virt_addr;
|
||||
@ -2765,7 +2766,8 @@ out_free:
|
||||
* The exported free_coherent function for dma_ops.
|
||||
*/
|
||||
static void free_coherent(struct device *dev, size_t size,
|
||||
void *virt_addr, dma_addr_t dma_addr)
|
||||
void *virt_addr, dma_addr_t dma_addr,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct protection_domain *domain;
|
||||
@ -2846,8 +2848,8 @@ static void __init prealloc_protection_domains(void)
|
||||
}
|
||||
|
||||
static struct dma_map_ops amd_iommu_dma_ops = {
|
||||
.alloc_coherent = alloc_coherent,
|
||||
.free_coherent = free_coherent,
|
||||
.alloc = alloc_coherent,
|
||||
.free = free_coherent,
|
||||
.map_page = map_page,
|
||||
.unmap_page = unmap_page,
|
||||
.map_sg = map_sg,
|
||||
|
@ -2949,7 +2949,8 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
}
|
||||
|
||||
static void *intel_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags)
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *vaddr;
|
||||
int order;
|
||||
@ -2981,7 +2982,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
|
||||
}
|
||||
|
||||
static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||
{
|
||||
int order;
|
||||
|
||||
@ -3126,8 +3127,8 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
}
|
||||
|
||||
struct dma_map_ops intel_dma_ops = {
|
||||
.alloc_coherent = intel_alloc_coherent,
|
||||
.free_coherent = intel_free_coherent,
|
||||
.alloc = intel_alloc_coherent,
|
||||
.free = intel_free_coherent,
|
||||
.map_sg = intel_map_sg,
|
||||
.unmap_sg = intel_unmap_sg,
|
||||
.map_page = intel_map_page,
|
||||
|
@ -204,7 +204,8 @@ error:
|
||||
|
||||
void *
|
||||
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags)
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
void *ret;
|
||||
int order = get_order(size);
|
||||
@ -253,7 +254,7 @@ EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
|
||||
|
||||
void
|
||||
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
||||
dma_addr_t dev_addr)
|
||||
dma_addr_t dev_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
int order = get_order(size);
|
||||
phys_addr_t phys;
|
||||
|
@ -13,6 +13,8 @@
|
||||
enum dma_attr {
|
||||
DMA_ATTR_WRITE_BARRIER,
|
||||
DMA_ATTR_WEAK_ORDERING,
|
||||
DMA_ATTR_WRITE_COMBINE,
|
||||
DMA_ATTR_NON_CONSISTENT,
|
||||
DMA_ATTR_MAX,
|
||||
};
|
||||
|
||||
|
@ -9,10 +9,15 @@
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
struct dma_map_ops {
|
||||
void* (*alloc_coherent)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp);
|
||||
void (*free_coherent)(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
void* (*alloc)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp,
|
||||
struct dma_attrs *attrs);
|
||||
void (*free)(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs);
|
||||
int (*mmap)(struct device *, struct vm_area_struct *,
|
||||
void *, dma_addr_t, size_t, struct dma_attrs *attrs);
|
||||
|
||||
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction dir,
|
||||
|
@ -7,11 +7,13 @@ extern void xen_swiotlb_init(int verbose);
|
||||
|
||||
extern void
|
||||
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flags);
|
||||
dma_addr_t *dma_handle, gfp_t flags,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
extern void
|
||||
xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
void *vaddr, dma_addr_t dma_handle,
|
||||
struct dma_attrs *attrs);
|
||||
|
||||
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
|
Loading…
Reference in New Issue
Block a user