2020-12-16 00:27:13 +03:00
// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
/*
* Copyright ( c ) 2020 Intel Corporation . All rights reserved .
*/
# include <linux/dma-buf.h>
# include <linux/dma-resv.h>
# include <linux/dma-mapping.h>
# include "uverbs.h"
int ib_umem_dmabuf_map_pages ( struct ib_umem_dmabuf * umem_dmabuf )
{
struct sg_table * sgt ;
struct scatterlist * sg ;
struct dma_fence * fence ;
unsigned long start , end , cur = 0 ;
unsigned int nmap = 0 ;
int i ;
dma_resv_assert_held ( umem_dmabuf - > attach - > dmabuf - > resv ) ;
if ( umem_dmabuf - > sgt )
goto wait_fence ;
sgt = dma_buf_map_attachment ( umem_dmabuf - > attach , DMA_BIDIRECTIONAL ) ;
if ( IS_ERR ( sgt ) )
return PTR_ERR ( sgt ) ;
/* modify the sg list in-place to match umem address and length */
start = ALIGN_DOWN ( umem_dmabuf - > umem . address , PAGE_SIZE ) ;
end = ALIGN ( umem_dmabuf - > umem . address + umem_dmabuf - > umem . length ,
PAGE_SIZE ) ;
for_each_sgtable_dma_sg ( sgt , sg , i ) {
if ( start < cur + sg_dma_len ( sg ) & & cur < end )
nmap + + ;
if ( cur < = start & & start < cur + sg_dma_len ( sg ) ) {
unsigned long offset = start - cur ;
umem_dmabuf - > first_sg = sg ;
umem_dmabuf - > first_sg_offset = offset ;
sg_dma_address ( sg ) + = offset ;
sg_dma_len ( sg ) - = offset ;
cur + = offset ;
}
if ( cur < end & & end < = cur + sg_dma_len ( sg ) ) {
unsigned long trim = cur + sg_dma_len ( sg ) - end ;
umem_dmabuf - > last_sg = sg ;
umem_dmabuf - > last_sg_trim = trim ;
sg_dma_len ( sg ) - = trim ;
break ;
}
cur + = sg_dma_len ( sg ) ;
}
umem_dmabuf - > umem . sg_head . sgl = umem_dmabuf - > first_sg ;
umem_dmabuf - > umem . sg_head . nents = nmap ;
umem_dmabuf - > umem . nmap = nmap ;
umem_dmabuf - > sgt = sgt ;
wait_fence :
/*
* Although the sg list is valid now , the content of the pages
* may be not up - to - date . Wait for the exporter to finish
* the migration .
*/
fence = dma_resv_get_excl ( umem_dmabuf - > attach - > dmabuf - > resv ) ;
if ( fence )
return dma_fence_wait ( fence , false ) ;
return 0 ;
}
EXPORT_SYMBOL ( ib_umem_dmabuf_map_pages ) ;
void ib_umem_dmabuf_unmap_pages ( struct ib_umem_dmabuf * umem_dmabuf )
{
dma_resv_assert_held ( umem_dmabuf - > attach - > dmabuf - > resv ) ;
if ( ! umem_dmabuf - > sgt )
return ;
/* retore the original sg list */
if ( umem_dmabuf - > first_sg ) {
sg_dma_address ( umem_dmabuf - > first_sg ) - =
umem_dmabuf - > first_sg_offset ;
sg_dma_len ( umem_dmabuf - > first_sg ) + =
umem_dmabuf - > first_sg_offset ;
umem_dmabuf - > first_sg = NULL ;
umem_dmabuf - > first_sg_offset = 0 ;
}
if ( umem_dmabuf - > last_sg ) {
sg_dma_len ( umem_dmabuf - > last_sg ) + =
umem_dmabuf - > last_sg_trim ;
umem_dmabuf - > last_sg = NULL ;
umem_dmabuf - > last_sg_trim = 0 ;
}
dma_buf_unmap_attachment ( umem_dmabuf - > attach , umem_dmabuf - > sgt ,
DMA_BIDIRECTIONAL ) ;
umem_dmabuf - > sgt = NULL ;
}
EXPORT_SYMBOL ( ib_umem_dmabuf_unmap_pages ) ;
struct ib_umem_dmabuf * ib_umem_dmabuf_get ( struct ib_device * device ,
unsigned long offset , size_t size ,
int fd , int access ,
const struct dma_buf_attach_ops * ops )
{
struct dma_buf * dmabuf ;
struct ib_umem_dmabuf * umem_dmabuf ;
struct ib_umem * umem ;
unsigned long end ;
struct ib_umem_dmabuf * ret = ERR_PTR ( - EINVAL ) ;
if ( check_add_overflow ( offset , ( unsigned long ) size , & end ) )
return ret ;
if ( unlikely ( ! ops | | ! ops - > move_notify ) )
return ret ;
dmabuf = dma_buf_get ( fd ) ;
if ( IS_ERR ( dmabuf ) )
return ERR_CAST ( dmabuf ) ;
if ( dmabuf - > size < end )
goto out_release_dmabuf ;
umem_dmabuf = kzalloc ( sizeof ( * umem_dmabuf ) , GFP_KERNEL ) ;
if ( ! umem_dmabuf ) {
ret = ERR_PTR ( - ENOMEM ) ;
goto out_release_dmabuf ;
}
umem = & umem_dmabuf - > umem ;
umem - > ibdev = device ;
umem - > length = size ;
umem - > address = offset ;
umem - > writable = ib_access_writable ( access ) ;
umem - > is_dmabuf = 1 ;
if ( ! ib_umem_num_pages ( umem ) )
goto out_free_umem ;
umem_dmabuf - > attach = dma_buf_dynamic_attach (
dmabuf ,
device - > dma_device ,
ops ,
umem_dmabuf ) ;
if ( IS_ERR ( umem_dmabuf - > attach ) ) {
ret = ERR_CAST ( umem_dmabuf - > attach ) ;
goto out_free_umem ;
}
return umem_dmabuf ;
out_free_umem :
kfree ( umem_dmabuf ) ;
out_release_dmabuf :
dma_buf_put ( dmabuf ) ;
return ret ;
}
EXPORT_SYMBOL ( ib_umem_dmabuf_get ) ;
void ib_umem_dmabuf_release ( struct ib_umem_dmabuf * umem_dmabuf )
{
struct dma_buf * dmabuf = umem_dmabuf - > attach - > dmabuf ;
2021-03-04 15:07:44 +03:00
dma_resv_lock ( dmabuf - > resv , NULL ) ;
ib_umem_dmabuf_unmap_pages ( umem_dmabuf ) ;
dma_resv_unlock ( dmabuf - > resv ) ;
2020-12-16 00:27:13 +03:00
dma_buf_detach ( dmabuf , umem_dmabuf - > attach ) ;
dma_buf_put ( dmabuf ) ;
kfree ( umem_dmabuf ) ;
}