2019-12-03 17:26:40 +00:00
// SPDX-License-Identifier: GPL-2.0
/*
* DMABUF CMA heap exporter
*
2020-11-21 23:49:59 +00:00
* Copyright ( C ) 2012 , 2019 , 2020 Linaro Ltd .
2019-12-03 17:26:40 +00:00
* Author : < benjamin . gaignard @ linaro . org > for ST - Ericsson .
2020-11-21 23:49:59 +00:00
*
* Also utilizing parts of Andrew Davis ' SRAM heap :
* Copyright ( C ) 2019 Texas Instruments Incorporated - http : //www.ti.com/
* Andrew F . Davis < afd @ ti . com >
2019-12-03 17:26:40 +00:00
*/
# include <linux/cma.h>
# include <linux/dma-buf.h>
# include <linux/dma-heap.h>
2020-09-11 10:56:52 +02:00
# include <linux/dma-map-ops.h>
2019-12-03 17:26:40 +00:00
# include <linux/err.h>
# include <linux/highmem.h>
2020-11-21 23:49:59 +00:00
# include <linux/io.h>
# include <linux/mm.h>
2019-12-03 17:26:40 +00:00
# include <linux/module.h>
# include <linux/scatterlist.h>
2020-11-21 23:49:59 +00:00
# include <linux/slab.h>
2020-12-16 00:49:31 +00:00
# include <linux/vmalloc.h>
2019-12-03 17:26:40 +00:00
struct cma_heap {
struct dma_heap * heap ;
struct cma * cma ;
} ;
2020-11-21 23:49:59 +00:00
struct cma_heap_buffer {
struct cma_heap * heap ;
struct list_head attachments ;
struct mutex lock ;
unsigned long len ;
struct page * cma_pages ;
struct page * * pages ;
pgoff_t pagecount ;
int vmap_cnt ;
void * vaddr ;
} ;
struct dma_heap_attachment {
struct device * dev ;
struct sg_table table ;
struct list_head list ;
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
bool mapped ;
2020-11-21 23:49:59 +00:00
} ;
static int cma_heap_attach ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attachment )
2019-12-03 17:26:40 +00:00
{
2020-11-21 23:49:59 +00:00
struct cma_heap_buffer * buffer = dmabuf - > priv ;
struct dma_heap_attachment * a ;
int ret ;
2019-12-03 17:26:40 +00:00
2020-11-21 23:49:59 +00:00
a = kzalloc ( sizeof ( * a ) , GFP_KERNEL ) ;
if ( ! a )
return - ENOMEM ;
ret = sg_alloc_table_from_pages ( & a - > table , buffer - > pages ,
buffer - > pagecount , 0 ,
buffer - > pagecount < < PAGE_SHIFT ,
GFP_KERNEL ) ;
if ( ret ) {
kfree ( a ) ;
return ret ;
}
a - > dev = attachment - > dev ;
INIT_LIST_HEAD ( & a - > list ) ;
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
a - > mapped = false ;
2020-11-21 23:49:59 +00:00
attachment - > priv = a ;
mutex_lock ( & buffer - > lock ) ;
list_add ( & a - > list , & buffer - > attachments ) ;
mutex_unlock ( & buffer - > lock ) ;
return 0 ;
}
static void cma_heap_detach ( struct dma_buf * dmabuf ,
struct dma_buf_attachment * attachment )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
struct dma_heap_attachment * a = attachment - > priv ;
mutex_lock ( & buffer - > lock ) ;
list_del ( & a - > list ) ;
mutex_unlock ( & buffer - > lock ) ;
sg_free_table ( & a - > table ) ;
kfree ( a ) ;
}
static struct sg_table * cma_heap_map_dma_buf ( struct dma_buf_attachment * attachment ,
enum dma_data_direction direction )
{
struct dma_heap_attachment * a = attachment - > priv ;
struct sg_table * table = & a - > table ;
int ret ;
ret = dma_map_sgtable ( attachment - > dev , table , direction , 0 ) ;
if ( ret )
return ERR_PTR ( - ENOMEM ) ;
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
a - > mapped = true ;
2020-11-21 23:49:59 +00:00
return table ;
}
static void cma_heap_unmap_dma_buf ( struct dma_buf_attachment * attachment ,
struct sg_table * table ,
enum dma_data_direction direction )
{
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
struct dma_heap_attachment * a = attachment - > priv ;
a - > mapped = false ;
2020-11-21 23:49:59 +00:00
dma_unmap_sgtable ( attachment - > dev , table , direction , 0 ) ;
}
static int cma_heap_dma_buf_begin_cpu_access ( struct dma_buf * dmabuf ,
enum dma_data_direction direction )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
struct dma_heap_attachment * a ;
if ( buffer - > vmap_cnt )
invalidate_kernel_vmap_range ( buffer - > vaddr , buffer - > len ) ;
mutex_lock ( & buffer - > lock ) ;
list_for_each_entry ( a , & buffer - > attachments , list ) {
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
if ( ! a - > mapped )
continue ;
2020-11-21 23:49:59 +00:00
dma_sync_sgtable_for_cpu ( a - > dev , & a - > table , direction ) ;
}
mutex_unlock ( & buffer - > lock ) ;
return 0 ;
}
static int cma_heap_dma_buf_end_cpu_access ( struct dma_buf * dmabuf ,
enum dma_data_direction direction )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
struct dma_heap_attachment * a ;
if ( buffer - > vmap_cnt )
flush_kernel_vmap_range ( buffer - > vaddr , buffer - > len ) ;
mutex_lock ( & buffer - > lock ) ;
list_for_each_entry ( a , & buffer - > attachments , list ) {
dma-buf: heaps: Skip sync if not mapped
This patch is basically a port of Ørjan Eide's similar patch for ION
https://lore.kernel.org/lkml/20200414134629.54567-1-orjan.eide@arm.com/
Only sync the sg-list of dma-buf heap attachment when the attachment
is actually mapped on the device.
dma-bufs may be synced at any time. It can be reached from user space
via DMA_BUF_IOCTL_SYNC, so there are no guarantees from callers on when
syncs may be attempted, and dma_buf_end_cpu_access() and
dma_buf_begin_cpu_access() may not be paired.
Since the sg_list's dma_address isn't set up until the buffer is used
on the device, and dma_map_sg() is called on it, the dma_address will be
NULL if sync is attempted on the dma-buf before it's mapped on a device.
Before v5.0 (commit 55897af63091 ("dma-direct: merge swiotlb_dma_ops
into the dma_direct code")) this was a problem as the dma-api (at least
the swiotlb_dma_ops on arm64) would use the potentially invalid
dma_address. How that failed depended on how the device handled physical
address 0. If 0 was a valid address to physical ram, that page would get
flushed a lot, while the actual pages in the buffer would not get synced
correctly. While if 0 is an invalid physical address it may cause a
fault and trigger a crash.
In v5.0 this was incidentally fixed by commit 55897af63091 ("dma-direct:
merge swiotlb_dma_ops into the dma_direct code"), as this moved the
dma-api to use the page pointer in the sg_list, and (for Ion buffers at
least) this will always be valid if the sg_list exists at all.
But, this issue is re-introduced in v5.3 with
commit 449fa54d6815 ("dma-direct: correct the physical addr in
dma_direct_sync_sg_for_cpu/device") moves the dma-api back to the old
behaviour and picks the dma_address that may be invalid.
dma-buf core doesn't ensure that the buffer is mapped on the device, and
thus have a valid sg_list, before calling the exporter's
begin_cpu_access.
Logic and commit message originally by: Ørjan Eide <orjan.eide@arm.com>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Laura Abbott <labbott@kernel.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Hridya Valsaraju <hridya@google.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Sandeep Patil <sspatil@google.com>
Cc: Daniel Mentz <danielmentz@google.com>
Cc: Chris Goldsworthy <cgoldswo@codeaurora.org>
Cc: Ørjan Eide <orjan.eide@arm.com>
Cc: Robin Murphy <robin.murphy@arm.com>
Cc: Ezequiel Garcia <ezequiel@collabora.com>
Cc: Simon Ser <contact@emersion.fr>
Cc: James Jones <jajones@nvidia.com>
Cc: linux-media@vger.kernel.org
Cc: dri-devel@lists.freedesktop.org
Reviewed-by: Brian Starkey <brian.starkey@arm.com>
Signed-off-by: John Stultz <john.stultz@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20201121235002.69945-5-john.stultz@linaro.org
2020-11-21 23:50:01 +00:00
if ( ! a - > mapped )
continue ;
2020-11-21 23:49:59 +00:00
dma_sync_sgtable_for_device ( a - > dev , & a - > table , direction ) ;
}
mutex_unlock ( & buffer - > lock ) ;
return 0 ;
}
static vm_fault_t cma_heap_vm_fault ( struct vm_fault * vmf )
{
struct vm_area_struct * vma = vmf - > vma ;
struct cma_heap_buffer * buffer = vma - > vm_private_data ;
if ( vmf - > pgoff > buffer - > pagecount )
return VM_FAULT_SIGBUS ;
vmf - > page = buffer - > pages [ vmf - > pgoff ] ;
get_page ( vmf - > page ) ;
return 0 ;
}
static const struct vm_operations_struct dma_heap_vm_ops = {
. fault = cma_heap_vm_fault ,
} ;
static int cma_heap_mmap ( struct dma_buf * dmabuf , struct vm_area_struct * vma )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
if ( ( vma - > vm_flags & ( VM_SHARED | VM_MAYSHARE ) ) = = 0 )
return - EINVAL ;
vma - > vm_ops = & dma_heap_vm_ops ;
vma - > vm_private_data = buffer ;
return 0 ;
}
static void * cma_heap_do_vmap ( struct cma_heap_buffer * buffer )
{
void * vaddr ;
vaddr = vmap ( buffer - > pages , buffer - > pagecount , VM_MAP , PAGE_KERNEL ) ;
if ( ! vaddr )
return ERR_PTR ( - ENOMEM ) ;
return vaddr ;
}
static int cma_heap_vmap ( struct dma_buf * dmabuf , struct dma_buf_map * map )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
void * vaddr ;
int ret = 0 ;
mutex_lock ( & buffer - > lock ) ;
if ( buffer - > vmap_cnt ) {
buffer - > vmap_cnt + + ;
dma_buf_map_set_vaddr ( map , buffer - > vaddr ) ;
goto out ;
}
vaddr = cma_heap_do_vmap ( buffer ) ;
if ( IS_ERR ( vaddr ) ) {
ret = PTR_ERR ( vaddr ) ;
goto out ;
}
buffer - > vaddr = vaddr ;
buffer - > vmap_cnt + + ;
dma_buf_map_set_vaddr ( map , buffer - > vaddr ) ;
out :
mutex_unlock ( & buffer - > lock ) ;
return ret ;
}
static void cma_heap_vunmap ( struct dma_buf * dmabuf , struct dma_buf_map * map )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
mutex_lock ( & buffer - > lock ) ;
if ( ! - - buffer - > vmap_cnt ) {
vunmap ( buffer - > vaddr ) ;
buffer - > vaddr = NULL ;
}
mutex_unlock ( & buffer - > lock ) ;
dma_buf_map_clear ( map ) ;
}
static void cma_heap_dma_buf_release ( struct dma_buf * dmabuf )
{
struct cma_heap_buffer * buffer = dmabuf - > priv ;
struct cma_heap * cma_heap = buffer - > heap ;
if ( buffer - > vmap_cnt > 0 ) {
WARN ( 1 , " %s: buffer still mapped in the kernel \n " , __func__ ) ;
vunmap ( buffer - > vaddr ) ;
buffer - > vaddr = NULL ;
}
cma_release ( cma_heap - > cma , buffer - > cma_pages , buffer - > pagecount ) ;
2019-12-03 17:26:40 +00:00
kfree ( buffer ) ;
}
2020-11-21 23:49:59 +00:00
static const struct dma_buf_ops cma_heap_buf_ops = {
. attach = cma_heap_attach ,
. detach = cma_heap_detach ,
. map_dma_buf = cma_heap_map_dma_buf ,
. unmap_dma_buf = cma_heap_unmap_dma_buf ,
. begin_cpu_access = cma_heap_dma_buf_begin_cpu_access ,
. end_cpu_access = cma_heap_dma_buf_end_cpu_access ,
. mmap = cma_heap_mmap ,
. vmap = cma_heap_vmap ,
. vunmap = cma_heap_vunmap ,
. release = cma_heap_dma_buf_release ,
} ;
2021-01-19 20:45:08 +00:00
static struct dma_buf * cma_heap_allocate ( struct dma_heap * heap ,
unsigned long len ,
unsigned long fd_flags ,
unsigned long heap_flags )
2019-12-03 17:26:40 +00:00
{
struct cma_heap * cma_heap = dma_heap_get_drvdata ( heap ) ;
2020-11-21 23:49:59 +00:00
struct cma_heap_buffer * buffer ;
DEFINE_DMA_BUF_EXPORT_INFO ( exp_info ) ;
2019-12-03 17:26:40 +00:00
size_t size = PAGE_ALIGN ( len ) ;
2020-11-21 23:49:59 +00:00
pgoff_t pagecount = size > > PAGE_SHIFT ;
2019-12-03 17:26:40 +00:00
unsigned long align = get_order ( size ) ;
2020-11-21 23:49:59 +00:00
struct page * cma_pages ;
2019-12-03 17:26:40 +00:00
struct dma_buf * dmabuf ;
int ret = - ENOMEM ;
pgoff_t pg ;
2020-11-21 23:49:59 +00:00
buffer = kzalloc ( sizeof ( * buffer ) , GFP_KERNEL ) ;
if ( ! buffer )
2021-01-19 20:45:08 +00:00
return ERR_PTR ( - ENOMEM ) ;
2019-12-03 17:26:40 +00:00
2020-11-21 23:49:59 +00:00
INIT_LIST_HEAD ( & buffer - > attachments ) ;
mutex_init ( & buffer - > lock ) ;
buffer - > len = size ;
if ( align > CONFIG_CMA_ALIGNMENT )
align = CONFIG_CMA_ALIGNMENT ;
2019-12-03 17:26:40 +00:00
2020-11-21 23:49:59 +00:00
cma_pages = cma_alloc ( cma_heap - > cma , pagecount , align , false ) ;
2019-12-03 17:26:40 +00:00
if ( ! cma_pages )
2020-11-21 23:49:59 +00:00
goto free_buffer ;
2019-12-03 17:26:40 +00:00
2020-11-21 23:49:59 +00:00
/* Clear the cma pages */
2019-12-03 17:26:40 +00:00
if ( PageHighMem ( cma_pages ) ) {
2020-11-21 23:49:59 +00:00
unsigned long nr_clear_pages = pagecount ;
2019-12-03 17:26:40 +00:00
struct page * page = cma_pages ;
while ( nr_clear_pages > 0 ) {
void * vaddr = kmap_atomic ( page ) ;
memset ( vaddr , 0 , PAGE_SIZE ) ;
kunmap_atomic ( vaddr ) ;
/*
* Avoid wasting time zeroing memory if the process
* has been killed by by SIGKILL
*/
if ( fatal_signal_pending ( current ) )
goto free_cma ;
page + + ;
nr_clear_pages - - ;
}
} else {
memset ( page_address ( cma_pages ) , 0 , size ) ;
}
2020-11-21 23:49:59 +00:00
buffer - > pages = kmalloc_array ( pagecount , sizeof ( * buffer - > pages ) , GFP_KERNEL ) ;
if ( ! buffer - > pages ) {
2019-12-03 17:26:40 +00:00
ret = - ENOMEM ;
goto free_cma ;
}
2020-11-21 23:49:59 +00:00
for ( pg = 0 ; pg < pagecount ; pg + + )
buffer - > pages [ pg ] = & cma_pages [ pg ] ;
buffer - > cma_pages = cma_pages ;
buffer - > heap = cma_heap ;
buffer - > pagecount = pagecount ;
2019-12-03 17:26:40 +00:00
/* create the dmabuf */
2021-02-09 19:48:18 +00:00
exp_info . exp_name = dma_heap_get_name ( heap ) ;
2020-11-21 23:49:59 +00:00
exp_info . ops = & cma_heap_buf_ops ;
exp_info . size = buffer - > len ;
exp_info . flags = fd_flags ;
exp_info . priv = buffer ;
dmabuf = dma_buf_export ( & exp_info ) ;
2019-12-03 17:26:40 +00:00
if ( IS_ERR ( dmabuf ) ) {
ret = PTR_ERR ( dmabuf ) ;
goto free_pages ;
}
2021-01-19 20:45:08 +00:00
return dmabuf ;
2019-12-03 17:26:40 +00:00
free_pages :
2020-11-21 23:49:59 +00:00
kfree ( buffer - > pages ) ;
2019-12-03 17:26:40 +00:00
free_cma :
2020-11-21 23:49:59 +00:00
cma_release ( cma_heap - > cma , cma_pages , pagecount ) ;
free_buffer :
kfree ( buffer ) ;
2021-01-19 20:45:08 +00:00
return ERR_PTR ( ret ) ;
2019-12-03 17:26:40 +00:00
}
static const struct dma_heap_ops cma_heap_ops = {
. allocate = cma_heap_allocate ,
} ;
static int __add_cma_heap ( struct cma * cma , void * data )
{
struct cma_heap * cma_heap ;
struct dma_heap_export_info exp_info ;
cma_heap = kzalloc ( sizeof ( * cma_heap ) , GFP_KERNEL ) ;
if ( ! cma_heap )
return - ENOMEM ;
cma_heap - > cma = cma ;
exp_info . name = cma_get_name ( cma ) ;
exp_info . ops = & cma_heap_ops ;
exp_info . priv = cma_heap ;
cma_heap - > heap = dma_heap_add ( & exp_info ) ;
if ( IS_ERR ( cma_heap - > heap ) ) {
int ret = PTR_ERR ( cma_heap - > heap ) ;
kfree ( cma_heap ) ;
return ret ;
}
return 0 ;
}
static int add_default_cma_heap ( void )
{
struct cma * default_cma = dev_get_cma_area ( NULL ) ;
int ret = 0 ;
if ( default_cma )
ret = __add_cma_heap ( default_cma , NULL ) ;
return ret ;
}
module_init ( add_default_cma_heap ) ;
MODULE_DESCRIPTION ( " DMA-BUF CMA Heap " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;