2010-12-14 21:40:46 +03:00
/******************************************************************************
* gntdev . c
*
* Device for accessing ( in user - space ) pages that have been granted by other
* domains .
*
* Copyright ( c ) 2006 - 2007 , D G Murray .
* ( c ) 2009 Gerd Hoffmann < kraxel @ redhat . com >
2018-07-20 12:01:47 +03:00
* ( c ) 2018 Oleksandr Andrushchenko , EPAM Systems Inc .
2010-12-14 21:40:46 +03:00
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# undef DEBUG
2013-06-28 14:21:41 +04:00
# define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
2010-12-14 21:40:46 +03:00
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/miscdevice.h>
# include <linux/fs.h>
# include <linux/uaccess.h>
# include <linux/sched.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2010-12-14 21:40:46 +03:00
# include <linux/spinlock.h>
# include <linux/slab.h>
2011-02-03 20:19:02 +03:00
# include <linux/highmem.h>
2017-03-06 17:21:16 +03:00
# include <linux/refcount.h>
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
# include <linux/of_device.h>
# endif
2010-12-14 21:40:46 +03:00
# include <xen/xen.h>
# include <xen/grant_table.h>
2011-03-10 02:07:34 +03:00
# include <xen/balloon.h>
2010-12-14 21:40:46 +03:00
# include <xen/gntdev.h>
2011-02-03 20:19:04 +03:00
# include <xen/events.h>
2015-06-17 17:28:02 +03:00
# include <xen/page.h>
2010-12-14 21:40:46 +03:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
2018-07-20 12:01:47 +03:00
# include "gntdev-common.h"
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
# include "gntdev-dmabuf.h"
# endif
2018-07-20 12:01:47 +03:00
2010-12-14 21:40:46 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
" Gerd Hoffmann <kraxel@redhat.com> " ) ;
MODULE_DESCRIPTION ( " User-space granted page access driver " ) ;
2011-02-03 20:18:59 +03:00
static int limit = 1024 * 1024 ;
2010-12-14 21:40:46 +03:00
module_param ( limit , int , 0644 ) ;
2011-02-03 20:18:59 +03:00
MODULE_PARM_DESC ( limit , " Maximum number of grants that may be mapped by "
" the gntdev device " ) ;
static atomic_t pages_mapped = ATOMIC_INIT ( 0 ) ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:02 +03:00
static int use_ptemod ;
2013-01-03 02:57:12 +04:00
# define populate_freeable_maps use_ptemod
2011-02-03 20:19:02 +03:00
2018-07-20 12:01:47 +03:00
static int unmap_grant_pages ( struct gntdev_grant_map * map ,
int offset , int pages ) ;
2011-02-03 20:19:02 +03:00
2018-07-20 12:01:46 +03:00
static struct miscdevice gntdev_miscdev ;
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
2018-07-20 12:01:47 +03:00
bool gntdev_account_mapped_pages ( int count )
{
return atomic_add_return ( count , & pages_mapped ) > limit ;
}
2010-12-14 21:40:46 +03:00
static void gntdev_print_maps ( struct gntdev_priv * priv ,
char * text , int text_index )
{
# ifdef DEBUG
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:18:59 +03:00
pr_debug ( " %s: maps list (priv %p) \n " , __func__ , priv ) ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next )
pr_debug ( " index %2d, count %2d %s \n " ,
map - > index , map - > count ,
map - > index = = text_index & & text ? text : " " ) ;
# endif
}
2018-07-20 12:01:47 +03:00
static void gntdev_free_map ( struct gntdev_grant_map * map )
2012-10-24 15:39:02 +04:00
{
if ( map = = NULL )
return ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
if ( map - > dma_vaddr ) {
struct gnttab_dma_alloc_args args ;
args . dev = map - > dma_dev ;
args . coherent = ! ! ( map - > dma_flags & GNTDEV_DMA_FLAG_COHERENT ) ;
args . nr_pages = map - > count ;
args . pages = map - > pages ;
args . frames = map - > frames ;
args . vaddr = map - > dma_vaddr ;
args . dev_bus_addr = map - > dma_bus_addr ;
gnttab_dma_free_pages ( & args ) ;
} else
# endif
2012-10-24 15:39:02 +04:00
if ( map - > pages )
2015-01-08 21:06:01 +03:00
gnttab_free_pages ( map - > count , map - > pages ) ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
kfree ( map - > frames ) ;
# endif
2012-10-24 15:39:02 +04:00
kfree ( map - > pages ) ;
kfree ( map - > grants ) ;
kfree ( map - > map_ops ) ;
kfree ( map - > unmap_ops ) ;
kfree ( map - > kmap_ops ) ;
2015-01-05 17:13:41 +03:00
kfree ( map - > kunmap_ops ) ;
2012-10-24 15:39:02 +04:00
kfree ( map ) ;
}
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * gntdev_alloc_map ( struct gntdev_priv * priv , int count ,
2018-07-20 12:01:46 +03:00
int dma_flags )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * add ;
2010-12-10 17:56:42 +03:00
int i ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:47 +03:00
add = kzalloc ( sizeof ( * add ) , GFP_KERNEL ) ;
2010-12-14 21:40:46 +03:00
if ( NULL = = add )
return NULL ;
2011-11-04 22:23:32 +04:00
add - > grants = kcalloc ( count , sizeof ( add - > grants [ 0 ] ) , GFP_KERNEL ) ;
add - > map_ops = kcalloc ( count , sizeof ( add - > map_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > unmap_ops = kcalloc ( count , sizeof ( add - > unmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > kmap_ops = kcalloc ( count , sizeof ( add - > kmap_ops [ 0 ] ) , GFP_KERNEL ) ;
2015-01-05 17:13:41 +03:00
add - > kunmap_ops = kcalloc ( count , sizeof ( add - > kunmap_ops [ 0 ] ) , GFP_KERNEL ) ;
2011-11-04 22:23:32 +04:00
add - > pages = kcalloc ( count , sizeof ( add - > pages [ 0 ] ) , GFP_KERNEL ) ;
2010-12-10 17:56:42 +03:00
if ( NULL = = add - > grants | |
NULL = = add - > map_ops | |
NULL = = add - > unmap_ops | |
2011-09-29 14:57:56 +04:00
NULL = = add - > kmap_ops | |
2015-01-05 17:13:41 +03:00
NULL = = add - > kunmap_ops | |
2010-12-10 17:56:42 +03:00
NULL = = add - > pages )
2010-12-14 21:40:46 +03:00
goto err ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
add - > dma_flags = dma_flags ;
/*
* Check if this mapping is requested to be backed
* by a DMA buffer .
*/
if ( dma_flags & ( GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT ) ) {
struct gnttab_dma_alloc_args args ;
add - > frames = kcalloc ( count , sizeof ( add - > frames [ 0 ] ) ,
GFP_KERNEL ) ;
if ( ! add - > frames )
goto err ;
/* Remember the device, so we can free DMA memory. */
add - > dma_dev = priv - > dma_dev ;
args . dev = priv - > dma_dev ;
args . coherent = ! ! ( dma_flags & GNTDEV_DMA_FLAG_COHERENT ) ;
args . nr_pages = count ;
args . pages = add - > pages ;
args . frames = add - > frames ;
if ( gnttab_dma_alloc_pages ( & args ) )
goto err ;
add - > dma_vaddr = args . vaddr ;
add - > dma_bus_addr = args . dev_bus_addr ;
} else
# endif
2015-01-08 21:06:01 +03:00
if ( gnttab_alloc_pages ( count , add - > pages ) )
2011-03-10 02:07:34 +03:00
goto err ;
2010-12-10 17:56:42 +03:00
for ( i = 0 ; i < count ; i + + ) {
2011-02-23 16:11:35 +03:00
add - > map_ops [ i ] . handle = - 1 ;
add - > unmap_ops [ i ] . handle = - 1 ;
2011-09-29 14:57:56 +04:00
add - > kmap_ops [ i ] . handle = - 1 ;
2015-01-05 17:13:41 +03:00
add - > kunmap_ops [ i ] . handle = - 1 ;
2010-12-10 17:56:42 +03:00
}
2010-12-14 21:40:46 +03:00
add - > index = 0 ;
add - > count = count ;
2017-03-06 17:21:16 +03:00
refcount_set ( & add - > users , 1 ) ;
2010-12-14 21:40:46 +03:00
return add ;
err :
2012-10-24 15:39:02 +04:00
gntdev_free_map ( add ) ;
2010-12-14 21:40:46 +03:00
return NULL ;
}
2018-07-20 12:01:47 +03:00
void gntdev_add_map ( struct gntdev_priv * priv , struct gntdev_grant_map * add )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
if ( add - > index + add - > count < map - > index ) {
list_add_tail ( & add - > next , & map - > next ) ;
goto done ;
}
add - > index = map - > index + map - > count ;
}
list_add_tail ( & add - > next , & priv - > maps ) ;
done :
gntdev_print_maps ( priv , " [new] " , add - > index ) ;
}
2018-07-20 12:01:47 +03:00
static struct gntdev_grant_map * gntdev_find_map_index ( struct gntdev_priv * priv ,
int index , int count )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
if ( map - > index ! = index )
continue ;
2011-02-03 20:19:04 +03:00
if ( count & & map - > count ! = count )
2010-12-14 21:40:46 +03:00
continue ;
return map ;
}
return NULL ;
}
2018-07-20 12:01:47 +03:00
void gntdev_put_map ( struct gntdev_priv * priv , struct gntdev_grant_map * map )
2010-12-14 21:40:46 +03:00
{
if ( ! map )
return ;
2010-12-10 17:56:42 +03:00
2017-03-06 17:21:16 +03:00
if ( ! refcount_dec_and_test ( & map - > users ) )
2011-02-03 20:19:01 +03:00
return ;
atomic_sub ( map - > count , & pages_mapped ) ;
2011-10-28 01:58:49 +04:00
if ( map - > notify . flags & UNMAP_NOTIFY_SEND_EVENT ) {
2011-02-03 20:19:04 +03:00
notify_remote_via_evtchn ( map - > notify . event ) ;
2011-10-28 01:58:49 +04:00
evtchn_put ( map - > notify . event ) ;
}
2011-02-03 20:19:04 +03:00
2013-01-03 02:57:12 +04:00
if ( populate_freeable_maps & & priv ) {
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2013-01-03 02:57:12 +04:00
list_del ( & map - > next ) ;
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2013-01-03 02:57:12 +04:00
}
2012-10-24 15:39:02 +04:00
if ( map - > pages & & ! use_ptemod )
unmap_grant_pages ( map , 0 , map - > count ) ;
gntdev_free_map ( map ) ;
2010-12-14 21:40:46 +03:00
}
/* ------------------------------------------------------------------ */
2019-07-12 06:58:43 +03:00
static int find_grant_ptes ( pte_t * pte , unsigned long addr , void * data )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = data ;
2010-12-14 21:40:46 +03:00
unsigned int pgnr = ( addr - map - > vma - > vm_start ) > > PAGE_SHIFT ;
2011-02-03 20:19:02 +03:00
int flags = map - > flags | GNTMAP_application_map | GNTMAP_contains_pte ;
2010-12-14 21:40:46 +03:00
u64 pte_maddr ;
BUG_ON ( pgnr > = map - > count ) ;
2010-12-08 21:54:32 +03:00
pte_maddr = arbitrary_virt_to_machine ( pte ) . maddr ;
2014-12-18 17:56:54 +03:00
/*
* Set the PTE as special to force get_user_pages_fast ( ) fall
* back to the slow path . If this is not supported as part of
* the grant map , it will be done afterwards .
*/
if ( xen_feature ( XENFEAT_gnttab_map_avail_bits ) )
flags | = ( 1 < < _GNTMAP_guest_avail0 ) ;
2011-02-03 20:19:02 +03:00
gnttab_set_map_op ( & map - > map_ops [ pgnr ] , pte_maddr , flags ,
2010-12-14 21:40:46 +03:00
map - > grants [ pgnr ] . ref ,
map - > grants [ pgnr ] . domid ) ;
2011-02-03 20:19:02 +03:00
gnttab_set_unmap_op ( & map - > unmap_ops [ pgnr ] , pte_maddr , flags ,
2011-02-23 16:11:35 +03:00
- 1 /* handle */ ) ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
2014-12-18 17:56:54 +03:00
# ifdef CONFIG_X86
2019-07-12 06:58:43 +03:00
static int set_grant_ptes_as_special ( pte_t * pte , unsigned long addr , void * data )
2014-12-18 17:56:54 +03:00
{
set_pte_at ( current - > mm , addr , pte , pte_mkspecial ( * pte ) ) ;
return 0 ;
}
# endif
2018-07-20 12:01:47 +03:00
int gntdev_map_grant_pages ( struct gntdev_grant_map * map )
2010-12-14 21:40:46 +03:00
{
int i , err = 0 ;
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
2011-02-10 00:11:32 +03:00
/* Note: it could already be mapped */
2011-02-23 16:11:35 +03:00
if ( map - > map_ops [ 0 ] . handle ! = - 1 )
2011-02-10 00:11:32 +03:00
return 0 ;
2011-02-03 20:19:02 +03:00
for ( i = 0 ; i < map - > count ; i + + ) {
2011-03-08 19:56:43 +03:00
unsigned long addr = ( unsigned long )
2011-02-03 20:19:02 +03:00
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
gnttab_set_map_op ( & map - > map_ops [ i ] , addr , map - > flags ,
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
gnttab_set_unmap_op ( & map - > unmap_ops [ i ] , addr ,
2011-02-23 16:11:35 +03:00
map - > flags , - 1 /* handle */ ) ;
2011-02-03 20:19:02 +03:00
}
2011-09-29 14:57:56 +04:00
} else {
/*
* Setup the map_ops corresponding to the pte entries pointing
* to the kernel linear addresses of the struct pages .
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes .
*/
for ( i = 0 ; i < map - > count ; i + + ) {
unsigned long address = ( unsigned long )
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
BUG_ON ( PageHighMem ( map - > pages [ i ] ) ) ;
2013-07-23 21:23:54 +04:00
gnttab_set_map_op ( & map - > kmap_ops [ i ] , address ,
map - > flags | GNTMAP_host_map ,
2011-09-29 14:57:56 +04:00
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
2015-01-05 17:13:41 +03:00
gnttab_set_unmap_op ( & map - > kunmap_ops [ i ] , address ,
map - > flags | GNTMAP_host_map , - 1 ) ;
2011-09-29 14:57:56 +04:00
}
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
pr_debug ( " map %d+%d \n " , map - > index , map - > count ) ;
2014-02-03 15:43:59 +04:00
err = gnttab_map_refs ( map - > map_ops , use_ptemod ? map - > kmap_ops : NULL ,
map - > pages , map - > count ) ;
2010-12-14 21:40:46 +03:00
if ( err )
return err ;
for ( i = 0 ; i < map - > count ; i + + ) {
2015-01-05 17:13:41 +03:00
if ( map - > map_ops [ i ] . status ) {
2010-12-14 21:40:46 +03:00
err = - EINVAL ;
2015-01-05 17:13:41 +03:00
continue ;
2011-02-23 16:11:35 +03:00
}
2015-01-05 17:13:41 +03:00
map - > unmap_ops [ i ] . handle = map - > map_ops [ i ] . handle ;
if ( use_ptemod )
map - > kunmap_ops [ i ] . handle = map - > kmap_ops [ i ] . handle ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
else if ( map - > dma_vaddr ) {
unsigned long bfn ;
bfn = pfn_to_bfn ( page_to_pfn ( map - > pages [ i ] ) ) ;
map - > unmap_ops [ i ] . dev_bus_addr = __pfn_to_phys ( bfn ) ;
}
# endif
2010-12-14 21:40:46 +03:00
}
return err ;
}
2018-07-20 12:01:47 +03:00
static int __unmap_grant_pages ( struct gntdev_grant_map * map , int offset ,
int pages )
2010-12-14 21:40:46 +03:00
{
int i , err = 0 ;
2015-01-05 18:07:46 +03:00
struct gntab_unmap_queue_data unmap_data ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:04 +03:00
if ( map - > notify . flags & UNMAP_NOTIFY_CLEAR_BYTE ) {
int pgno = ( map - > notify . addr > > PAGE_SHIFT ) ;
2013-01-03 02:57:13 +04:00
if ( pgno > = offset & & pgno < offset + pages ) {
/* No need for kmap, pages are in lowmem */
uint8_t * tmp = pfn_to_kaddr ( page_to_pfn ( map - > pages [ pgno ] ) ) ;
2011-02-03 20:19:04 +03:00
tmp [ map - > notify . addr & ( PAGE_SIZE - 1 ) ] = 0 ;
map - > notify . flags & = ~ UNMAP_NOTIFY_CLEAR_BYTE ;
}
}
2015-01-05 18:07:46 +03:00
unmap_data . unmap_ops = map - > unmap_ops + offset ;
unmap_data . kunmap_ops = use_ptemod ? map - > kunmap_ops + offset : NULL ;
unmap_data . pages = map - > pages + offset ;
unmap_data . count = pages ;
2015-04-03 09:42:59 +03:00
err = gnttab_unmap_refs_sync ( & unmap_data ) ;
if ( err )
return err ;
2010-12-14 21:40:46 +03:00
for ( i = 0 ; i < pages ; i + + ) {
if ( map - > unmap_ops [ offset + i ] . status )
err = - EINVAL ;
2011-02-23 16:11:35 +03:00
pr_debug ( " unmap handle=%d st=%d \n " ,
map - > unmap_ops [ offset + i ] . handle ,
map - > unmap_ops [ offset + i ] . status ) ;
map - > unmap_ops [ offset + i ] . handle = - 1 ;
2010-12-14 21:40:46 +03:00
}
return err ;
}
2018-07-20 12:01:47 +03:00
static int unmap_grant_pages ( struct gntdev_grant_map * map , int offset ,
int pages )
2011-02-09 23:12:00 +03:00
{
int range , err = 0 ;
pr_debug ( " unmap %d+%d [%d+%d] \n " , map - > index , map - > count , offset , pages ) ;
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants . Only unmap valid ranges .
*/
while ( pages & & ! err ) {
2011-02-23 16:11:35 +03:00
while ( pages & & map - > unmap_ops [ offset ] . handle = = - 1 ) {
2011-02-09 23:12:00 +03:00
offset + + ;
pages - - ;
}
range = 0 ;
while ( range < pages ) {
2018-01-09 15:10:21 +03:00
if ( map - > unmap_ops [ offset + range ] . handle = = - 1 )
2011-02-09 23:12:00 +03:00
break ;
range + + ;
}
err = __unmap_grant_pages ( map , offset , range ) ;
offset + = range ;
pages - = range ;
}
return err ;
}
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
2011-03-07 23:18:57 +03:00
static void gntdev_vma_open ( struct vm_area_struct * vma )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2011-03-07 23:18:57 +03:00
pr_debug ( " gntdev_vma_open %p \n " , vma ) ;
2017-03-06 17:21:16 +03:00
refcount_inc ( & map - > users ) ;
2011-03-07 23:18:57 +03:00
}
2010-12-14 21:40:46 +03:00
static void gntdev_vma_close ( struct vm_area_struct * vma )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2013-01-03 02:57:12 +04:00
struct file * file = vma - > vm_file ;
struct gntdev_priv * priv = file - > private_data ;
2010-12-14 21:40:46 +03:00
2011-03-07 23:18:57 +03:00
pr_debug ( " gntdev_vma_close %p \n " , vma ) ;
2013-01-03 02:57:11 +04:00
if ( use_ptemod ) {
/* It is possible that an mmu notifier could be running
* concurrently , so take priv - > lock to ensure that the vma won ' t
* vanishing during the unmap_grant_pages call , since we will
* spin here until that completes . Such a concurrent call will
* not do any unmapping , since that has been done prior to
* closing the vma , but it may still iterate the unmap_ops list .
*/
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2013-01-03 02:57:11 +04:00
map - > vma = NULL ;
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2013-01-03 02:57:11 +04:00
}
2010-12-14 21:40:46 +03:00
vma - > vm_private_data = NULL ;
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2010-12-14 21:40:46 +03:00
}
2014-12-18 17:59:07 +03:00
static struct page * gntdev_vma_find_special_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2014-12-18 17:59:07 +03:00
return map - > pages [ ( addr - map - > pages_vm_start ) > > PAGE_SHIFT ] ;
}
2015-09-10 01:39:26 +03:00
static const struct vm_operations_struct gntdev_vmops = {
2011-03-07 23:18:57 +03:00
. open = gntdev_vma_open ,
2010-12-14 21:40:46 +03:00
. close = gntdev_vma_close ,
2014-12-18 17:59:07 +03:00
. find_special_page = gntdev_vma_find_special_page ,
2010-12-14 21:40:46 +03:00
} ;
/* ------------------------------------------------------------------ */
2018-08-22 07:52:33 +03:00
static bool in_range ( struct gntdev_grant_map * map ,
unsigned long start , unsigned long end )
{
if ( ! map - > vma )
return false ;
if ( map - > vma - > vm_start > = end )
return false ;
if ( map - > vma - > vm_end < = start )
return false ;
return true ;
}
2018-09-05 02:21:39 +03:00
static int unmap_if_in_range ( struct gntdev_grant_map * map ,
unsigned long start , unsigned long end ,
bool blockable )
2013-01-03 02:57:12 +04:00
{
unsigned long mstart , mend ;
int err ;
2018-09-05 02:21:39 +03:00
if ( ! in_range ( map , start , end ) )
return 0 ;
if ( ! blockable )
return - EAGAIN ;
2013-01-03 02:57:12 +04:00
mstart = max ( start , map - > vma - > vm_start ) ;
mend = min ( end , map - > vma - > vm_end ) ;
pr_debug ( " map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ,
start , end , mstart , mend ) ;
err = unmap_grant_pages ( map ,
( mstart - map - > vma - > vm_start ) > > PAGE_SHIFT ,
( mend - mstart ) > > PAGE_SHIFT ) ;
WARN_ON ( err ) ;
2018-09-05 02:21:39 +03:00
return 0 ;
2013-01-03 02:57:12 +04:00
}
2018-08-22 07:52:33 +03:00
static int mn_invl_range_start ( struct mmu_notifier * mn ,
2018-12-28 11:38:05 +03:00
const struct mmu_notifier_range * range )
2010-12-14 21:40:46 +03:00
{
struct gntdev_priv * priv = container_of ( mn , struct gntdev_priv , mn ) ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2018-08-22 07:52:33 +03:00
int ret = 0 ;
2019-05-14 03:20:38 +03:00
if ( mmu_notifier_range_blockable ( range ) )
2018-08-22 07:52:33 +03:00
mutex_lock ( & priv - > lock ) ;
else if ( ! mutex_trylock ( & priv - > lock ) )
return - EAGAIN ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
2018-12-28 11:38:05 +03:00
ret = unmap_if_in_range ( map , range - > start , range - > end ,
2019-05-14 03:20:38 +03:00
mmu_notifier_range_blockable ( range ) ) ;
2018-09-05 02:21:39 +03:00
if ( ret )
2018-08-22 07:52:33 +03:00
goto out_unlock ;
2013-01-03 02:57:12 +04:00
}
list_for_each_entry ( map , & priv - > freeable_maps , next ) {
2018-12-28 11:38:05 +03:00
ret = unmap_if_in_range ( map , range - > start , range - > end ,
2019-05-14 03:20:38 +03:00
mmu_notifier_range_blockable ( range ) ) ;
2018-09-05 02:21:39 +03:00
if ( ret )
2018-08-22 07:52:33 +03:00
goto out_unlock ;
2010-12-14 21:40:46 +03:00
}
2018-08-22 07:52:33 +03:00
out_unlock :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2018-08-22 07:52:33 +03:00
return ret ;
2010-12-14 21:40:46 +03:00
}
static void mn_release ( struct mmu_notifier * mn ,
struct mm_struct * mm )
{
struct gntdev_priv * priv = container_of ( mn , struct gntdev_priv , mn ) ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
int err ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
if ( ! map - > vma )
continue ;
pr_debug ( " map %d+%d (%lx %lx) \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ) ;
err = unmap_grant_pages ( map , /* offset */ 0 , map - > count ) ;
WARN_ON ( err ) ;
}
2013-01-03 02:57:12 +04:00
list_for_each_entry ( map , & priv - > freeable_maps , next ) {
if ( ! map - > vma )
continue ;
pr_debug ( " map %d+%d (%lx %lx) \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ) ;
err = unmap_grant_pages ( map , /* offset */ 0 , map - > count ) ;
WARN_ON ( err ) ;
}
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
}
2015-11-30 01:02:49 +03:00
static const struct mmu_notifier_ops gntdev_mmu_ops = {
2010-12-14 21:40:46 +03:00
. release = mn_release ,
. invalidate_range_start = mn_invl_range_start ,
} ;
/* ------------------------------------------------------------------ */
static int gntdev_open ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv ;
int ret = 0 ;
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
INIT_LIST_HEAD ( & priv - > maps ) ;
2013-01-03 02:57:12 +04:00
INIT_LIST_HEAD ( & priv - > freeable_maps ) ;
2015-01-09 21:06:12 +03:00
mutex_init ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
2019-02-14 17:23:20 +03:00
priv - > dmabuf_priv = gntdev_dmabuf_init ( flip ) ;
2018-07-20 12:01:48 +03:00
if ( IS_ERR ( priv - > dmabuf_priv ) ) {
ret = PTR_ERR ( priv - > dmabuf_priv ) ;
kfree ( priv ) ;
return ret ;
}
# endif
2011-02-03 20:19:02 +03:00
if ( use_ptemod ) {
priv - > mm = get_task_mm ( current ) ;
if ( ! priv - > mm ) {
kfree ( priv ) ;
return - ENOMEM ;
}
priv - > mn . ops = & gntdev_mmu_ops ;
ret = mmu_notifier_register ( & priv - > mn , priv - > mm ) ;
mmput ( priv - > mm ) ;
2010-12-14 21:40:46 +03:00
}
if ( ret ) {
kfree ( priv ) ;
return ret ;
}
flip - > private_data = priv ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv - > dma_dev = gntdev_miscdev . this_device ;
/*
* The device is not spawn from a device tree , so arch_setup_dma_ops
* is not called , thus leaving the device with dummy DMA ops .
* Fix this by calling of_dma_configure ( ) with a NULL node to set
* default DMA ops .
*/
of_dma_configure ( priv - > dma_dev , NULL , true ) ;
# endif
2010-12-14 21:40:46 +03:00
pr_debug ( " priv %p \n " , priv ) ;
return 0 ;
}
static int gntdev_release ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv = flip - > private_data ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
pr_debug ( " priv %p \n " , priv ) ;
2015-06-26 04:28:24 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
while ( ! list_empty ( & priv - > maps ) ) {
2018-07-20 12:01:47 +03:00
map = list_entry ( priv - > maps . next ,
struct gntdev_grant_map , next ) ;
2011-02-03 20:19:01 +03:00
list_del ( & map - > next ) ;
2013-01-03 02:57:12 +04:00
gntdev_put_map ( NULL /* already removed */ , map ) ;
2010-12-14 21:40:46 +03:00
}
2013-01-03 02:57:12 +04:00
WARN_ON ( ! list_empty ( & priv - > freeable_maps ) ) ;
2015-06-26 04:28:24 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini ( priv - > dmabuf_priv ) ;
# endif
2011-02-03 20:19:02 +03:00
if ( use_ptemod )
mmu_notifier_unregister ( & priv - > mn , priv - > mm ) ;
2018-07-20 12:01:48 +03:00
2010-12-14 21:40:46 +03:00
kfree ( priv ) ;
return 0 ;
}
static long gntdev_ioctl_map_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_map_grant_ref __user * u )
{
struct ioctl_gntdev_map_grant_ref op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
int err ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, add %d \n " , priv , op . count ) ;
if ( unlikely ( op . count < = 0 ) )
return - EINVAL ;
err = - ENOMEM ;
2018-07-20 12:01:46 +03:00
map = gntdev_alloc_map ( priv , op . count , 0 /* This is not a dma-buf. */ ) ;
2010-12-14 21:40:46 +03:00
if ( ! map )
return err ;
2011-02-03 20:18:59 +03:00
2018-07-20 12:01:47 +03:00
if ( unlikely ( gntdev_account_mapped_pages ( op . count ) ) ) {
2011-02-03 20:19:01 +03:00
pr_debug ( " can't map: over limit \n " ) ;
2013-01-03 02:57:12 +04:00
gntdev_put_map ( NULL , map ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
2011-02-03 20:19:01 +03:00
if ( copy_from_user ( map - > grants , & u - > refs ,
sizeof ( map - > grants [ 0 ] ) * op . count ) ! = 0 ) {
2013-01-03 02:57:12 +04:00
gntdev_put_map ( NULL , map ) ;
return - EFAULT ;
2011-02-03 20:18:59 +03:00
}
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
gntdev_add_map ( priv , map ) ;
op . index = map - > index < < PAGE_SHIFT ;
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:01 +03:00
if ( copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
static long gntdev_ioctl_unmap_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_unmap_grant_ref __user * u )
{
struct ioctl_gntdev_unmap_grant_ref op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
int err = - ENOENT ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, del %d+%d \n " , priv , ( int ) op . index , ( int ) op . count ) ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
map = gntdev_find_map_index ( priv , op . index > > PAGE_SHIFT , op . count ) ;
2011-02-03 20:19:01 +03:00
if ( map ) {
list_del ( & map - > next ) ;
2013-01-03 02:57:12 +04:00
if ( populate_freeable_maps )
list_add_tail ( & map - > next , & priv - > freeable_maps ) ;
2011-02-03 20:19:01 +03:00
err = 0 ;
}
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-10-11 23:16:06 +04:00
if ( map )
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
static long gntdev_ioctl_get_offset_for_vaddr ( struct gntdev_priv * priv ,
struct ioctl_gntdev_get_offset_for_vaddr __user * u )
{
struct ioctl_gntdev_get_offset_for_vaddr op ;
2011-02-03 20:19:00 +03:00
struct vm_area_struct * vma ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2013-01-03 02:57:11 +04:00
int rv = - EINVAL ;
2010-12-14 21:40:46 +03:00
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, offset for vaddr %lx \n " , priv , ( unsigned long ) op . vaddr ) ;
2013-01-03 02:57:11 +04:00
down_read ( & current - > mm - > mmap_sem ) ;
2011-02-03 20:19:00 +03:00
vma = find_vma ( current - > mm , op . vaddr ) ;
if ( ! vma | | vma - > vm_ops ! = & gntdev_vmops )
2013-01-03 02:57:11 +04:00
goto out_unlock ;
2011-02-03 20:19:00 +03:00
map = vma - > vm_private_data ;
if ( ! map )
2013-01-03 02:57:11 +04:00
goto out_unlock ;
2011-02-03 20:19:00 +03:00
2010-12-14 21:40:46 +03:00
op . offset = map - > index < < PAGE_SHIFT ;
op . count = map - > count ;
2013-01-03 02:57:11 +04:00
rv = 0 ;
2010-12-14 21:40:46 +03:00
2013-01-03 02:57:11 +04:00
out_unlock :
up_read ( & current - > mm - > mmap_sem ) ;
if ( rv = = 0 & & copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
2010-12-14 21:40:46 +03:00
return - EFAULT ;
2013-01-03 02:57:11 +04:00
return rv ;
2010-12-14 21:40:46 +03:00
}
2011-02-03 20:19:04 +03:00
static long gntdev_ioctl_notify ( struct gntdev_priv * priv , void __user * u )
{
struct ioctl_gntdev_unmap_notify op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2011-02-03 20:19:04 +03:00
int rc ;
2011-10-28 01:58:49 +04:00
int out_flags ;
unsigned int out_event ;
2011-02-03 20:19:04 +03:00
if ( copy_from_user ( & op , u , sizeof ( op ) ) )
return - EFAULT ;
if ( op . action & ~ ( UNMAP_NOTIFY_CLEAR_BYTE | UNMAP_NOTIFY_SEND_EVENT ) )
return - EINVAL ;
2011-10-28 01:58:49 +04:00
/* We need to grab a reference to the event channel we are going to use
* to send the notify before releasing the reference we may already have
* ( if someone has called this ioctl twice ) . This is required so that
* it is possible to change the clear_byte part of the notification
* without disturbing the event channel part , which may now be the last
* reference to that event channel .
*/
if ( op . action & UNMAP_NOTIFY_SEND_EVENT ) {
if ( evtchn_get ( op . event_channel_port ) )
return - EINVAL ;
}
out_flags = op . action ;
out_event = op . event_channel_port ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2011-02-03 20:19:04 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
uint64_t begin = map - > index < < PAGE_SHIFT ;
uint64_t end = ( map - > index + map - > count ) < < PAGE_SHIFT ;
if ( op . index > = begin & & op . index < end )
goto found ;
}
rc = - ENOENT ;
goto unlock_out ;
found :
2011-02-10 02:15:50 +03:00
if ( ( op . action & UNMAP_NOTIFY_CLEAR_BYTE ) & &
( map - > flags & GNTMAP_readonly ) ) {
rc = - EINVAL ;
goto unlock_out ;
}
2011-10-28 01:58:49 +04:00
out_flags = map - > notify . flags ;
out_event = map - > notify . event ;
2011-02-03 20:19:04 +03:00
map - > notify . flags = op . action ;
map - > notify . addr = op . index - ( map - > index < < PAGE_SHIFT ) ;
map - > notify . event = op . event_channel_port ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
rc = 0 ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
unlock_out :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-10-28 01:58:49 +04:00
/* Drop the reference to the event channel we did not save in the map */
if ( out_flags & UNMAP_NOTIFY_SEND_EVENT )
evtchn_put ( out_event ) ;
2011-02-03 20:19:04 +03:00
return rc ;
}
2016-05-09 12:59:48 +03:00
# define GNTDEV_COPY_BATCH 16
2014-12-02 19:13:26 +03:00
struct gntdev_copy_batch {
struct gnttab_copy ops [ GNTDEV_COPY_BATCH ] ;
struct page * pages [ GNTDEV_COPY_BATCH ] ;
s16 __user * status [ GNTDEV_COPY_BATCH ] ;
unsigned int nr_ops ;
unsigned int nr_pages ;
} ;
static int gntdev_get_page ( struct gntdev_copy_batch * batch , void __user * virt ,
bool writeable , unsigned long * gfn )
{
unsigned long addr = ( unsigned long ) virt ;
struct page * page ;
unsigned long xen_pfn ;
int ret ;
2019-05-14 03:17:11 +03:00
ret = get_user_pages_fast ( addr , 1 , writeable ? FOLL_WRITE : 0 , & page ) ;
2014-12-02 19:13:26 +03:00
if ( ret < 0 )
return ret ;
batch - > pages [ batch - > nr_pages + + ] = page ;
xen_pfn = page_to_xen_pfn ( page ) + XEN_PFN_DOWN ( addr & ~ PAGE_MASK ) ;
* gfn = pfn_to_gfn ( xen_pfn ) ;
return 0 ;
}
static void gntdev_put_pages ( struct gntdev_copy_batch * batch )
{
unsigned int i ;
for ( i = 0 ; i < batch - > nr_pages ; i + + )
put_page ( batch - > pages [ i ] ) ;
batch - > nr_pages = 0 ;
}
static int gntdev_copy ( struct gntdev_copy_batch * batch )
{
unsigned int i ;
gnttab_batch_copy ( batch - > ops , batch - > nr_ops ) ;
gntdev_put_pages ( batch ) ;
/*
* For each completed op , update the status if the op failed
* and all previous ops for the segment were successful .
*/
for ( i = 0 ; i < batch - > nr_ops ; i + + ) {
s16 status = batch - > ops [ i ] . status ;
s16 old_status ;
if ( status = = GNTST_okay )
continue ;
if ( __get_user ( old_status , batch - > status [ i ] ) )
return - EFAULT ;
if ( old_status ! = GNTST_okay )
continue ;
if ( __put_user ( status , batch - > status [ i ] ) )
return - EFAULT ;
}
batch - > nr_ops = 0 ;
return 0 ;
}
static int gntdev_grant_copy_seg ( struct gntdev_copy_batch * batch ,
struct gntdev_grant_copy_segment * seg ,
s16 __user * status )
{
uint16_t copied = 0 ;
/*
* Disallow local - > local copies since there is only space in
* batch - > pages for one page per - op and this would be a very
* expensive memcpy ( ) .
*/
if ( ! ( seg - > flags & ( GNTCOPY_source_gref | GNTCOPY_dest_gref ) ) )
return - EINVAL ;
/* Can't cross page if source/dest is a grant ref. */
if ( seg - > flags & GNTCOPY_source_gref ) {
if ( seg - > source . foreign . offset + seg - > len > XEN_PAGE_SIZE )
return - EINVAL ;
}
if ( seg - > flags & GNTCOPY_dest_gref ) {
if ( seg - > dest . foreign . offset + seg - > len > XEN_PAGE_SIZE )
return - EINVAL ;
}
if ( put_user ( GNTST_okay , status ) )
return - EFAULT ;
while ( copied < seg - > len ) {
struct gnttab_copy * op ;
void __user * virt ;
size_t len , off ;
unsigned long gfn ;
int ret ;
if ( batch - > nr_ops > = GNTDEV_COPY_BATCH ) {
ret = gntdev_copy ( batch ) ;
if ( ret < 0 )
return ret ;
}
len = seg - > len - copied ;
op = & batch - > ops [ batch - > nr_ops ] ;
op - > flags = 0 ;
if ( seg - > flags & GNTCOPY_source_gref ) {
op - > source . u . ref = seg - > source . foreign . ref ;
op - > source . domid = seg - > source . foreign . domid ;
op - > source . offset = seg - > source . foreign . offset + copied ;
op - > flags | = GNTCOPY_source_gref ;
} else {
virt = seg - > source . virt + copied ;
off = ( unsigned long ) virt & ~ XEN_PAGE_MASK ;
len = min ( len , ( size_t ) XEN_PAGE_SIZE - off ) ;
ret = gntdev_get_page ( batch , virt , false , & gfn ) ;
if ( ret < 0 )
return ret ;
op - > source . u . gmfn = gfn ;
op - > source . domid = DOMID_SELF ;
op - > source . offset = off ;
}
if ( seg - > flags & GNTCOPY_dest_gref ) {
op - > dest . u . ref = seg - > dest . foreign . ref ;
op - > dest . domid = seg - > dest . foreign . domid ;
op - > dest . offset = seg - > dest . foreign . offset + copied ;
op - > flags | = GNTCOPY_dest_gref ;
} else {
virt = seg - > dest . virt + copied ;
off = ( unsigned long ) virt & ~ XEN_PAGE_MASK ;
len = min ( len , ( size_t ) XEN_PAGE_SIZE - off ) ;
ret = gntdev_get_page ( batch , virt , true , & gfn ) ;
if ( ret < 0 )
return ret ;
op - > dest . u . gmfn = gfn ;
op - > dest . domid = DOMID_SELF ;
op - > dest . offset = off ;
}
op - > len = len ;
copied + = len ;
batch - > status [ batch - > nr_ops ] = status ;
batch - > nr_ops + + ;
}
return 0 ;
}
static long gntdev_ioctl_grant_copy ( struct gntdev_priv * priv , void __user * u )
{
struct ioctl_gntdev_grant_copy copy ;
struct gntdev_copy_batch batch ;
unsigned int i ;
int ret = 0 ;
if ( copy_from_user ( & copy , u , sizeof ( copy ) ) )
return - EFAULT ;
batch . nr_ops = 0 ;
batch . nr_pages = 0 ;
for ( i = 0 ; i < copy . count ; i + + ) {
struct gntdev_grant_copy_segment seg ;
if ( copy_from_user ( & seg , & copy . segments [ i ] , sizeof ( seg ) ) ) {
ret = - EFAULT ;
goto out ;
}
ret = gntdev_grant_copy_seg ( & batch , & seg , & copy . segments [ i ] . status ) ;
if ( ret < 0 )
goto out ;
cond_resched ( ) ;
}
if ( batch . nr_ops )
ret = gntdev_copy ( & batch ) ;
return ret ;
out :
gntdev_put_pages ( & batch ) ;
return ret ;
}
2010-12-14 21:40:46 +03:00
static long gntdev_ioctl ( struct file * flip ,
unsigned int cmd , unsigned long arg )
{
struct gntdev_priv * priv = flip - > private_data ;
void __user * ptr = ( void __user * ) arg ;
switch ( cmd ) {
case IOCTL_GNTDEV_MAP_GRANT_REF :
return gntdev_ioctl_map_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_UNMAP_GRANT_REF :
return gntdev_ioctl_unmap_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR :
return gntdev_ioctl_get_offset_for_vaddr ( priv , ptr ) ;
2011-02-03 20:19:04 +03:00
case IOCTL_GNTDEV_SET_UNMAP_NOTIFY :
return gntdev_ioctl_notify ( priv , ptr ) ;
2014-12-02 19:13:26 +03:00
case IOCTL_GNTDEV_GRANT_COPY :
return gntdev_ioctl_grant_copy ( priv , ptr ) ;
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS :
return gntdev_ioctl_dmabuf_exp_from_refs ( priv , use_ptemod , ptr ) ;
case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED :
return gntdev_ioctl_dmabuf_exp_wait_released ( priv , ptr ) ;
case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS :
return gntdev_ioctl_dmabuf_imp_to_refs ( priv , ptr ) ;
case IOCTL_GNTDEV_DMABUF_IMP_RELEASE :
return gntdev_ioctl_dmabuf_imp_release ( priv , ptr ) ;
# endif
2010-12-14 21:40:46 +03:00
default :
pr_debug ( " priv %p, unknown cmd %x \n " , priv , cmd ) ;
return - ENOIOCTLCMD ;
}
return 0 ;
}
static int gntdev_mmap ( struct file * flip , struct vm_area_struct * vma )
{
struct gntdev_priv * priv = flip - > private_data ;
int index = vma - > vm_pgoff ;
2016-05-24 03:04:32 +03:00
int count = vma_pages ( vma ) ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2019-05-14 03:22:23 +03:00
int err = - EINVAL ;
2010-12-14 21:40:46 +03:00
if ( ( vma - > vm_flags & VM_WRITE ) & & ! ( vma - > vm_flags & VM_SHARED ) )
return - EINVAL ;
pr_debug ( " map %d+%d at %lx (pgoff %lx) \n " ,
index , count , vma - > vm_start , vma - > vm_pgoff ) ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
map = gntdev_find_map_index ( priv , index , count ) ;
if ( ! map )
goto unlock_out ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod & & map - > vma )
2010-12-14 21:40:46 +03:00
goto unlock_out ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod & & priv - > mm ! = vma - > vm_mm ) {
2013-06-28 14:21:41 +04:00
pr_warn ( " Huh? Other mm? \n " ) ;
2010-12-14 21:40:46 +03:00
goto unlock_out ;
}
2017-03-06 17:21:16 +03:00
refcount_inc ( & map - > users ) ;
2011-02-03 20:19:01 +03:00
2010-12-14 21:40:46 +03:00
vma - > vm_ops = & gntdev_vmops ;
2016-11-21 17:56:06 +03:00
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP ;
2011-03-07 23:18:57 +03:00
if ( use_ptemod )
2012-04-03 21:05:47 +04:00
vma - > vm_flags | = VM_DONTCOPY ;
2010-12-14 21:40:46 +03:00
vma - > vm_private_data = map ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod )
map - > vma = vma ;
2011-02-10 00:11:32 +03:00
if ( map - > flags ) {
if ( ( vma - > vm_flags & VM_WRITE ) & &
( map - > flags & GNTMAP_readonly ) )
2011-03-19 08:45:43 +03:00
goto out_unlock_put ;
2011-02-10 00:11:32 +03:00
} else {
map - > flags = GNTMAP_host_map ;
if ( ! ( vma - > vm_flags & VM_WRITE ) )
map - > flags | = GNTMAP_readonly ;
}
2010-12-14 21:40:46 +03:00
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-01-07 14:51:47 +03:00
2011-02-03 20:19:02 +03:00
if ( use_ptemod ) {
2017-10-25 18:08:07 +03:00
map - > pages_vm_start = vma - > vm_start ;
2011-02-03 20:19:02 +03:00
err = apply_to_page_range ( vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start ,
find_grant_ptes , map ) ;
if ( err ) {
2013-06-28 14:21:41 +04:00
pr_warn ( " find_grant_ptes() failure. \n " ) ;
2011-02-03 22:16:54 +03:00
goto out_put_map ;
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
}
2018-07-20 12:01:47 +03:00
err = gntdev_map_grant_pages ( map ) ;
2011-02-03 22:16:54 +03:00
if ( err )
goto out_put_map ;
2011-01-07 14:51:47 +03:00
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
2019-05-14 03:22:23 +03:00
err = vm_map_pages ( vma , map - > pages , map - > count ) ;
if ( err )
goto out_put_map ;
2014-12-18 17:56:54 +03:00
} else {
# ifdef CONFIG_X86
/*
* If the PTEs were not made special by the grant map
* hypercall , do so here .
*
* This is racy since the mapping is already visible
* to userspace but userspace should be well - behaved
* enough to not touch it until the mmap ( ) call
* returns .
*/
if ( ! xen_feature ( XENFEAT_gnttab_map_avail_bits ) ) {
apply_to_page_range ( vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start ,
set_grant_ptes_as_special , NULL ) ;
}
# endif
2011-02-03 20:19:02 +03:00
}
2011-01-07 14:51:47 +03:00
return 0 ;
2010-12-14 21:40:46 +03:00
unlock_out :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
return err ;
2011-02-03 22:16:54 +03:00
2011-03-19 08:45:43 +03:00
out_unlock_put :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-02-03 22:16:54 +03:00
out_put_map :
2018-01-09 15:10:22 +03:00
if ( use_ptemod ) {
2011-02-09 23:11:59 +03:00
map - > vma = NULL ;
2018-01-09 15:10:22 +03:00
unmap_grant_pages ( map , 0 , map - > count ) ;
}
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2011-02-03 22:16:54 +03:00
return err ;
2010-12-14 21:40:46 +03:00
}
static const struct file_operations gntdev_fops = {
. owner = THIS_MODULE ,
. open = gntdev_open ,
. release = gntdev_release ,
. mmap = gntdev_mmap ,
. unlocked_ioctl = gntdev_ioctl
} ;
static struct miscdevice gntdev_miscdev = {
. minor = MISC_DYNAMIC_MINOR ,
. name = " xen/gntdev " ,
. fops = & gntdev_fops ,
} ;
/* ------------------------------------------------------------------ */
static int __init gntdev_init ( void )
{
int err ;
if ( ! xen_domain ( ) )
return - ENODEV ;
2014-01-03 19:20:18 +04:00
use_ptemod = ! xen_feature ( XENFEAT_auto_translated_physmap ) ;
2011-02-03 20:19:02 +03:00
2010-12-14 21:40:46 +03:00
err = misc_register ( & gntdev_miscdev ) ;
if ( err ! = 0 ) {
2013-06-28 14:21:41 +04:00
pr_err ( " Could not register gntdev device \n " ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
return 0 ;
}
static void __exit gntdev_exit ( void )
{
misc_deregister ( & gntdev_miscdev ) ;
}
module_init ( gntdev_init ) ;
module_exit ( gntdev_exit ) ;
/* ------------------------------------------------------------------ */