2010-12-14 21:40:46 +03:00
/******************************************************************************
* gntdev . c
*
* Device for accessing ( in user - space ) pages that have been granted by other
* domains .
*
* Copyright ( c ) 2006 - 2007 , D G Murray .
* ( c ) 2009 Gerd Hoffmann < kraxel @ redhat . com >
2018-07-20 12:01:47 +03:00
* ( c ) 2018 Oleksandr Andrushchenko , EPAM Systems Inc .
2010-12-14 21:40:46 +03:00
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# undef DEBUG
2013-06-28 14:21:41 +04:00
# define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
2019-10-08 22:41:55 +03:00
# include <linux/dma-mapping.h>
2010-12-14 21:40:46 +03:00
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/miscdevice.h>
# include <linux/fs.h>
# include <linux/uaccess.h>
# include <linux/sched.h>
2017-02-08 20:51:29 +03:00
# include <linux/sched/mm.h>
2010-12-14 21:40:46 +03:00
# include <linux/spinlock.h>
# include <linux/slab.h>
2011-02-03 20:19:02 +03:00
# include <linux/highmem.h>
2017-03-06 17:21:16 +03:00
# include <linux/refcount.h>
2010-12-14 21:40:46 +03:00
# include <xen/xen.h>
# include <xen/grant_table.h>
2011-03-10 02:07:34 +03:00
# include <xen/balloon.h>
2010-12-14 21:40:46 +03:00
# include <xen/gntdev.h>
2011-02-03 20:19:04 +03:00
# include <xen/events.h>
2015-06-17 17:28:02 +03:00
# include <xen/page.h>
2010-12-14 21:40:46 +03:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
2018-07-20 12:01:47 +03:00
# include "gntdev-common.h"
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
# include "gntdev-dmabuf.h"
# endif
2018-07-20 12:01:47 +03:00
2010-12-14 21:40:46 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
" Gerd Hoffmann <kraxel@redhat.com> " ) ;
MODULE_DESCRIPTION ( " User-space granted page access driver " ) ;
2019-11-07 14:15:45 +03:00
static unsigned int limit = 64 * 1024 ;
module_param ( limit , uint , 0644 ) ;
MODULE_PARM_DESC ( limit ,
" Maximum number of grants that may be mapped by one mapping request " ) ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:02 +03:00
static int use_ptemod ;
2018-07-20 12:01:47 +03:00
static int unmap_grant_pages ( struct gntdev_grant_map * map ,
int offset , int pages ) ;
2011-02-03 20:19:02 +03:00
2018-07-20 12:01:46 +03:00
static struct miscdevice gntdev_miscdev ;
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
2019-11-07 14:15:45 +03:00
bool gntdev_test_page_count ( unsigned int count )
2018-07-20 12:01:47 +03:00
{
2019-11-07 14:15:45 +03:00
return ! count | | count > limit ;
2018-07-20 12:01:47 +03:00
}
2010-12-14 21:40:46 +03:00
static void gntdev_print_maps ( struct gntdev_priv * priv ,
char * text , int text_index )
{
# ifdef DEBUG
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:18:59 +03:00
pr_debug ( " %s: maps list (priv %p) \n " , __func__ , priv ) ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next )
pr_debug ( " index %2d, count %2d %s \n " ,
map - > index , map - > count ,
map - > index = = text_index & & text ? text : " " ) ;
# endif
}
2018-07-20 12:01:47 +03:00
static void gntdev_free_map ( struct gntdev_grant_map * map )
2012-10-24 15:39:02 +04:00
{
if ( map = = NULL )
return ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
if ( map - > dma_vaddr ) {
struct gnttab_dma_alloc_args args ;
args . dev = map - > dma_dev ;
args . coherent = ! ! ( map - > dma_flags & GNTDEV_DMA_FLAG_COHERENT ) ;
args . nr_pages = map - > count ;
args . pages = map - > pages ;
args . frames = map - > frames ;
args . vaddr = map - > dma_vaddr ;
args . dev_bus_addr = map - > dma_bus_addr ;
gnttab_dma_free_pages ( & args ) ;
} else
# endif
2012-10-24 15:39:02 +04:00
if ( map - > pages )
2015-01-08 21:06:01 +03:00
gnttab_free_pages ( map - > count , map - > pages ) ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
2019-11-07 14:15:46 +03:00
kvfree ( map - > frames ) ;
2018-07-20 12:01:46 +03:00
# endif
2019-11-07 14:15:46 +03:00
kvfree ( map - > pages ) ;
kvfree ( map - > grants ) ;
kvfree ( map - > map_ops ) ;
kvfree ( map - > unmap_ops ) ;
kvfree ( map - > kmap_ops ) ;
kvfree ( map - > kunmap_ops ) ;
2012-10-24 15:39:02 +04:00
kfree ( map ) ;
}
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * gntdev_alloc_map ( struct gntdev_priv * priv , int count ,
2018-07-20 12:01:46 +03:00
int dma_flags )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * add ;
2010-12-10 17:56:42 +03:00
int i ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:47 +03:00
add = kzalloc ( sizeof ( * add ) , GFP_KERNEL ) ;
2010-12-14 21:40:46 +03:00
if ( NULL = = add )
return NULL ;
2019-11-07 14:15:46 +03:00
add - > grants = kvcalloc ( count , sizeof ( add - > grants [ 0 ] ) , GFP_KERNEL ) ;
add - > map_ops = kvcalloc ( count , sizeof ( add - > map_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > unmap_ops = kvcalloc ( count , sizeof ( add - > unmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > kmap_ops = kvcalloc ( count , sizeof ( add - > kmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > kunmap_ops = kvcalloc ( count ,
sizeof ( add - > kunmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > pages = kvcalloc ( count , sizeof ( add - > pages [ 0 ] ) , GFP_KERNEL ) ;
2010-12-10 17:56:42 +03:00
if ( NULL = = add - > grants | |
NULL = = add - > map_ops | |
NULL = = add - > unmap_ops | |
2011-09-29 14:57:56 +04:00
NULL = = add - > kmap_ops | |
2015-01-05 17:13:41 +03:00
NULL = = add - > kunmap_ops | |
2010-12-10 17:56:42 +03:00
NULL = = add - > pages )
2010-12-14 21:40:46 +03:00
goto err ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
add - > dma_flags = dma_flags ;
/*
* Check if this mapping is requested to be backed
* by a DMA buffer .
*/
if ( dma_flags & ( GNTDEV_DMA_FLAG_WC | GNTDEV_DMA_FLAG_COHERENT ) ) {
struct gnttab_dma_alloc_args args ;
2019-11-07 14:15:46 +03:00
add - > frames = kvcalloc ( count , sizeof ( add - > frames [ 0 ] ) ,
GFP_KERNEL ) ;
2018-07-20 12:01:46 +03:00
if ( ! add - > frames )
goto err ;
/* Remember the device, so we can free DMA memory. */
add - > dma_dev = priv - > dma_dev ;
args . dev = priv - > dma_dev ;
args . coherent = ! ! ( dma_flags & GNTDEV_DMA_FLAG_COHERENT ) ;
args . nr_pages = count ;
args . pages = add - > pages ;
args . frames = add - > frames ;
if ( gnttab_dma_alloc_pages ( & args ) )
goto err ;
add - > dma_vaddr = args . vaddr ;
add - > dma_bus_addr = args . dev_bus_addr ;
} else
# endif
2015-01-08 21:06:01 +03:00
if ( gnttab_alloc_pages ( count , add - > pages ) )
2011-03-10 02:07:34 +03:00
goto err ;
2010-12-10 17:56:42 +03:00
for ( i = 0 ; i < count ; i + + ) {
2011-02-23 16:11:35 +03:00
add - > map_ops [ i ] . handle = - 1 ;
add - > unmap_ops [ i ] . handle = - 1 ;
2011-09-29 14:57:56 +04:00
add - > kmap_ops [ i ] . handle = - 1 ;
2015-01-05 17:13:41 +03:00
add - > kunmap_ops [ i ] . handle = - 1 ;
2010-12-10 17:56:42 +03:00
}
2010-12-14 21:40:46 +03:00
add - > index = 0 ;
add - > count = count ;
2017-03-06 17:21:16 +03:00
refcount_set ( & add - > users , 1 ) ;
2010-12-14 21:40:46 +03:00
return add ;
err :
2012-10-24 15:39:02 +04:00
gntdev_free_map ( add ) ;
2010-12-14 21:40:46 +03:00
return NULL ;
}
2018-07-20 12:01:47 +03:00
void gntdev_add_map ( struct gntdev_priv * priv , struct gntdev_grant_map * add )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
if ( add - > index + add - > count < map - > index ) {
list_add_tail ( & add - > next , & map - > next ) ;
goto done ;
}
add - > index = map - > index + map - > count ;
}
list_add_tail ( & add - > next , & priv - > maps ) ;
done :
gntdev_print_maps ( priv , " [new] " , add - > index ) ;
}
2018-07-20 12:01:47 +03:00
static struct gntdev_grant_map * gntdev_find_map_index ( struct gntdev_priv * priv ,
int index , int count )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
if ( map - > index ! = index )
continue ;
2011-02-03 20:19:04 +03:00
if ( count & & map - > count ! = count )
2010-12-14 21:40:46 +03:00
continue ;
return map ;
}
return NULL ;
}
2018-07-20 12:01:47 +03:00
void gntdev_put_map ( struct gntdev_priv * priv , struct gntdev_grant_map * map )
2010-12-14 21:40:46 +03:00
{
if ( ! map )
return ;
2010-12-10 17:56:42 +03:00
2017-03-06 17:21:16 +03:00
if ( ! refcount_dec_and_test ( & map - > users ) )
2011-02-03 20:19:01 +03:00
return ;
2011-10-28 01:58:49 +04:00
if ( map - > notify . flags & UNMAP_NOTIFY_SEND_EVENT ) {
2011-02-03 20:19:04 +03:00
notify_remote_via_evtchn ( map - > notify . event ) ;
2011-10-28 01:58:49 +04:00
evtchn_put ( map - > notify . event ) ;
}
2011-02-03 20:19:04 +03:00
2012-10-24 15:39:02 +04:00
if ( map - > pages & & ! use_ptemod )
unmap_grant_pages ( map , 0 , map - > count ) ;
gntdev_free_map ( map ) ;
2010-12-14 21:40:46 +03:00
}
/* ------------------------------------------------------------------ */
2019-07-12 06:58:43 +03:00
static int find_grant_ptes ( pte_t * pte , unsigned long addr , void * data )
2010-12-14 21:40:46 +03:00
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = data ;
2010-12-14 21:40:46 +03:00
unsigned int pgnr = ( addr - map - > vma - > vm_start ) > > PAGE_SHIFT ;
2011-02-03 20:19:02 +03:00
int flags = map - > flags | GNTMAP_application_map | GNTMAP_contains_pte ;
2010-12-14 21:40:46 +03:00
u64 pte_maddr ;
BUG_ON ( pgnr > = map - > count ) ;
2010-12-08 21:54:32 +03:00
pte_maddr = arbitrary_virt_to_machine ( pte ) . maddr ;
2014-12-18 17:56:54 +03:00
/*
* Set the PTE as special to force get_user_pages_fast ( ) fall
* back to the slow path . If this is not supported as part of
* the grant map , it will be done afterwards .
*/
if ( xen_feature ( XENFEAT_gnttab_map_avail_bits ) )
flags | = ( 1 < < _GNTMAP_guest_avail0 ) ;
2011-02-03 20:19:02 +03:00
gnttab_set_map_op ( & map - > map_ops [ pgnr ] , pte_maddr , flags ,
2010-12-14 21:40:46 +03:00
map - > grants [ pgnr ] . ref ,
map - > grants [ pgnr ] . domid ) ;
2011-02-03 20:19:02 +03:00
gnttab_set_unmap_op ( & map - > unmap_ops [ pgnr ] , pte_maddr , flags ,
2011-02-23 16:11:35 +03:00
- 1 /* handle */ ) ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
2014-12-18 17:56:54 +03:00
# ifdef CONFIG_X86
2019-07-12 06:58:43 +03:00
static int set_grant_ptes_as_special ( pte_t * pte , unsigned long addr , void * data )
2014-12-18 17:56:54 +03:00
{
set_pte_at ( current - > mm , addr , pte , pte_mkspecial ( * pte ) ) ;
return 0 ;
}
# endif
2018-07-20 12:01:47 +03:00
int gntdev_map_grant_pages ( struct gntdev_grant_map * map )
2010-12-14 21:40:46 +03:00
{
int i , err = 0 ;
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
2011-02-10 00:11:32 +03:00
/* Note: it could already be mapped */
2011-02-23 16:11:35 +03:00
if ( map - > map_ops [ 0 ] . handle ! = - 1 )
2011-02-10 00:11:32 +03:00
return 0 ;
2011-02-03 20:19:02 +03:00
for ( i = 0 ; i < map - > count ; i + + ) {
2011-03-08 19:56:43 +03:00
unsigned long addr = ( unsigned long )
2011-02-03 20:19:02 +03:00
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
gnttab_set_map_op ( & map - > map_ops [ i ] , addr , map - > flags ,
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
gnttab_set_unmap_op ( & map - > unmap_ops [ i ] , addr ,
2011-02-23 16:11:35 +03:00
map - > flags , - 1 /* handle */ ) ;
2011-02-03 20:19:02 +03:00
}
2011-09-29 14:57:56 +04:00
} else {
/*
* Setup the map_ops corresponding to the pte entries pointing
* to the kernel linear addresses of the struct pages .
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes .
2021-02-15 10:51:07 +03:00
* Note that GNTMAP_device_map isn ' t needed here : The
* dev_bus_addr output field gets consumed only from - > map_ops ,
* and by not requesting it when mapping we also avoid needing
* to mirror dev_bus_addr into - > unmap_ops ( and holding an extra
* reference to the page in the hypervisor ) .
2011-09-29 14:57:56 +04:00
*/
2021-02-15 10:51:07 +03:00
unsigned int flags = ( map - > flags & ~ GNTMAP_device_map ) |
GNTMAP_host_map ;
2011-09-29 14:57:56 +04:00
for ( i = 0 ; i < map - > count ; i + + ) {
unsigned long address = ( unsigned long )
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
BUG_ON ( PageHighMem ( map - > pages [ i ] ) ) ;
2021-02-15 10:51:07 +03:00
gnttab_set_map_op ( & map - > kmap_ops [ i ] , address , flags ,
2011-09-29 14:57:56 +04:00
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
2015-01-05 17:13:41 +03:00
gnttab_set_unmap_op ( & map - > kunmap_ops [ i ] , address ,
2021-02-15 10:51:07 +03:00
flags , - 1 ) ;
2011-09-29 14:57:56 +04:00
}
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
pr_debug ( " map %d+%d \n " , map - > index , map - > count ) ;
2014-02-03 15:43:59 +04:00
err = gnttab_map_refs ( map - > map_ops , use_ptemod ? map - > kmap_ops : NULL ,
map - > pages , map - > count ) ;
2010-12-14 21:40:46 +03:00
if ( err )
return err ;
for ( i = 0 ; i < map - > count ; i + + ) {
2015-01-05 17:13:41 +03:00
if ( map - > map_ops [ i ] . status ) {
2010-12-14 21:40:46 +03:00
err = - EINVAL ;
2015-01-05 17:13:41 +03:00
continue ;
2011-02-23 16:11:35 +03:00
}
2015-01-05 17:13:41 +03:00
2021-02-15 10:51:07 +03:00
if ( map - > flags & GNTMAP_device_map )
map - > unmap_ops [ i ] . dev_bus_addr = map - > map_ops [ i ] . dev_bus_addr ;
2015-01-05 17:13:41 +03:00
map - > unmap_ops [ i ] . handle = map - > map_ops [ i ] . handle ;
if ( use_ptemod )
map - > kunmap_ops [ i ] . handle = map - > kmap_ops [ i ] . handle ;
2010-12-14 21:40:46 +03:00
}
return err ;
}
2018-07-20 12:01:47 +03:00
static int __unmap_grant_pages ( struct gntdev_grant_map * map , int offset ,
int pages )
2010-12-14 21:40:46 +03:00
{
int i , err = 0 ;
2015-01-05 18:07:46 +03:00
struct gntab_unmap_queue_data unmap_data ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:04 +03:00
if ( map - > notify . flags & UNMAP_NOTIFY_CLEAR_BYTE ) {
int pgno = ( map - > notify . addr > > PAGE_SHIFT ) ;
2013-01-03 02:57:13 +04:00
if ( pgno > = offset & & pgno < offset + pages ) {
/* No need for kmap, pages are in lowmem */
uint8_t * tmp = pfn_to_kaddr ( page_to_pfn ( map - > pages [ pgno ] ) ) ;
2011-02-03 20:19:04 +03:00
tmp [ map - > notify . addr & ( PAGE_SIZE - 1 ) ] = 0 ;
map - > notify . flags & = ~ UNMAP_NOTIFY_CLEAR_BYTE ;
}
}
2015-01-05 18:07:46 +03:00
unmap_data . unmap_ops = map - > unmap_ops + offset ;
unmap_data . kunmap_ops = use_ptemod ? map - > kunmap_ops + offset : NULL ;
unmap_data . pages = map - > pages + offset ;
unmap_data . count = pages ;
2015-04-03 09:42:59 +03:00
err = gnttab_unmap_refs_sync ( & unmap_data ) ;
if ( err )
return err ;
2010-12-14 21:40:46 +03:00
for ( i = 0 ; i < pages ; i + + ) {
if ( map - > unmap_ops [ offset + i ] . status )
err = - EINVAL ;
2011-02-23 16:11:35 +03:00
pr_debug ( " unmap handle=%d st=%d \n " ,
map - > unmap_ops [ offset + i ] . handle ,
map - > unmap_ops [ offset + i ] . status ) ;
map - > unmap_ops [ offset + i ] . handle = - 1 ;
2010-12-14 21:40:46 +03:00
}
return err ;
}
2018-07-20 12:01:47 +03:00
static int unmap_grant_pages ( struct gntdev_grant_map * map , int offset ,
int pages )
2011-02-09 23:12:00 +03:00
{
int range , err = 0 ;
pr_debug ( " unmap %d+%d [%d+%d] \n " , map - > index , map - > count , offset , pages ) ;
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants . Only unmap valid ranges .
*/
while ( pages & & ! err ) {
2011-02-23 16:11:35 +03:00
while ( pages & & map - > unmap_ops [ offset ] . handle = = - 1 ) {
2011-02-09 23:12:00 +03:00
offset + + ;
pages - - ;
}
range = 0 ;
while ( range < pages ) {
2018-01-09 15:10:21 +03:00
if ( map - > unmap_ops [ offset + range ] . handle = = - 1 )
2011-02-09 23:12:00 +03:00
break ;
range + + ;
}
err = __unmap_grant_pages ( map , offset , range ) ;
offset + = range ;
pages - = range ;
}
return err ;
}
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
2011-03-07 23:18:57 +03:00
static void gntdev_vma_open ( struct vm_area_struct * vma )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2011-03-07 23:18:57 +03:00
pr_debug ( " gntdev_vma_open %p \n " , vma ) ;
2017-03-06 17:21:16 +03:00
refcount_inc ( & map - > users ) ;
2011-03-07 23:18:57 +03:00
}
2010-12-14 21:40:46 +03:00
static void gntdev_vma_close ( struct vm_area_struct * vma )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2013-01-03 02:57:12 +04:00
struct file * file = vma - > vm_file ;
struct gntdev_priv * priv = file - > private_data ;
2010-12-14 21:40:46 +03:00
2011-03-07 23:18:57 +03:00
pr_debug ( " gntdev_vma_close %p \n " , vma ) ;
2013-01-03 02:57:11 +04:00
if ( use_ptemod ) {
2019-11-12 23:22:31 +03:00
WARN_ON ( map - > vma ! = vma ) ;
mmu_interval_notifier_remove ( & map - > notifier ) ;
2013-01-03 02:57:11 +04:00
map - > vma = NULL ;
}
2010-12-14 21:40:46 +03:00
vma - > vm_private_data = NULL ;
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2010-12-14 21:40:46 +03:00
}
2014-12-18 17:59:07 +03:00
static struct page * gntdev_vma_find_special_page ( struct vm_area_struct * vma ,
unsigned long addr )
{
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map = vma - > vm_private_data ;
2014-12-18 17:59:07 +03:00
return map - > pages [ ( addr - map - > pages_vm_start ) > > PAGE_SHIFT ] ;
}
2015-09-10 01:39:26 +03:00
static const struct vm_operations_struct gntdev_vmops = {
2011-03-07 23:18:57 +03:00
. open = gntdev_vma_open ,
2010-12-14 21:40:46 +03:00
. close = gntdev_vma_close ,
2014-12-18 17:59:07 +03:00
. find_special_page = gntdev_vma_find_special_page ,
2010-12-14 21:40:46 +03:00
} ;
/* ------------------------------------------------------------------ */
2019-11-12 23:22:31 +03:00
static bool gntdev_invalidate ( struct mmu_interval_notifier * mn ,
const struct mmu_notifier_range * range ,
unsigned long cur_seq )
2013-01-03 02:57:12 +04:00
{
2019-11-12 23:22:31 +03:00
struct gntdev_grant_map * map =
container_of ( mn , struct gntdev_grant_map , notifier ) ;
2013-01-03 02:57:12 +04:00
unsigned long mstart , mend ;
int err ;
2019-11-12 23:22:31 +03:00
if ( ! mmu_notifier_range_blockable ( range ) )
return false ;
2018-09-05 02:21:39 +03:00
2019-11-12 23:22:31 +03:00
/*
* If the VMA is split or otherwise changed the notifier is not
* updated , but we don ' t want to process VA ' s outside the modified
* VMA . FIXME : It would be much more understandable to just prevent
* modifying the VMA in the first place .
*/
if ( map - > vma - > vm_start > = range - > end | |
map - > vma - > vm_end < = range - > start )
return true ;
2018-09-05 02:21:39 +03:00
2019-11-12 23:22:31 +03:00
mstart = max ( range - > start , map - > vma - > vm_start ) ;
mend = min ( range - > end , map - > vma - > vm_end ) ;
2013-01-03 02:57:12 +04:00
pr_debug ( " map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ,
2019-11-12 23:22:31 +03:00
range - > start , range - > end , mstart , mend ) ;
2013-01-03 02:57:12 +04:00
err = unmap_grant_pages ( map ,
( mstart - map - > vma - > vm_start ) > > PAGE_SHIFT ,
( mend - mstart ) > > PAGE_SHIFT ) ;
WARN_ON ( err ) ;
2018-09-05 02:21:39 +03:00
2019-11-12 23:22:31 +03:00
return true ;
2010-12-14 21:40:46 +03:00
}
2019-11-12 23:22:31 +03:00
static const struct mmu_interval_notifier_ops gntdev_mmu_ops = {
. invalidate = gntdev_invalidate ,
2010-12-14 21:40:46 +03:00
} ;
/* ------------------------------------------------------------------ */
static int gntdev_open ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv ;
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
INIT_LIST_HEAD ( & priv - > maps ) ;
2015-01-09 21:06:12 +03:00
mutex_init ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
2019-02-14 17:23:20 +03:00
priv - > dmabuf_priv = gntdev_dmabuf_init ( flip ) ;
2018-07-20 12:01:48 +03:00
if ( IS_ERR ( priv - > dmabuf_priv ) ) {
2019-11-11 15:20:09 +03:00
int ret = PTR_ERR ( priv - > dmabuf_priv ) ;
2018-07-20 12:01:48 +03:00
2010-12-14 21:40:46 +03:00
kfree ( priv ) ;
return ret ;
}
2019-11-11 15:20:09 +03:00
# endif
2010-12-14 21:40:46 +03:00
flip - > private_data = priv ;
2018-07-20 12:01:46 +03:00
# ifdef CONFIG_XEN_GRANT_DMA_ALLOC
priv - > dma_dev = gntdev_miscdev . this_device ;
2019-10-08 22:41:55 +03:00
dma_coerce_mask_and_coherent ( priv - > dma_dev , DMA_BIT_MASK ( 64 ) ) ;
2018-07-20 12:01:46 +03:00
# endif
2010-12-14 21:40:46 +03:00
pr_debug ( " priv %p \n " , priv ) ;
return 0 ;
}
static int gntdev_release ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv = flip - > private_data ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
pr_debug ( " priv %p \n " , priv ) ;
2015-06-26 04:28:24 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
while ( ! list_empty ( & priv - > maps ) ) {
2018-07-20 12:01:47 +03:00
map = list_entry ( priv - > maps . next ,
struct gntdev_grant_map , next ) ;
2011-02-03 20:19:01 +03:00
list_del ( & map - > next ) ;
2013-01-03 02:57:12 +04:00
gntdev_put_map ( NULL /* already removed */ , map ) ;
2010-12-14 21:40:46 +03:00
}
2015-06-26 04:28:24 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
gntdev_dmabuf_fini ( priv - > dmabuf_priv ) ;
# endif
2010-12-14 21:40:46 +03:00
kfree ( priv ) ;
return 0 ;
}
static long gntdev_ioctl_map_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_map_grant_ref __user * u )
{
struct ioctl_gntdev_map_grant_ref op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
int err ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, add %d \n " , priv , op . count ) ;
2019-11-07 14:15:45 +03:00
if ( unlikely ( gntdev_test_page_count ( op . count ) ) )
2010-12-14 21:40:46 +03:00
return - EINVAL ;
err = - ENOMEM ;
2018-07-20 12:01:46 +03:00
map = gntdev_alloc_map ( priv , op . count , 0 /* This is not a dma-buf. */ ) ;
2010-12-14 21:40:46 +03:00
if ( ! map )
return err ;
2011-02-03 20:18:59 +03:00
2011-02-03 20:19:01 +03:00
if ( copy_from_user ( map - > grants , & u - > refs ,
sizeof ( map - > grants [ 0 ] ) * op . count ) ! = 0 ) {
2013-01-03 02:57:12 +04:00
gntdev_put_map ( NULL , map ) ;
return - EFAULT ;
2011-02-03 20:18:59 +03:00
}
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
gntdev_add_map ( priv , map ) ;
op . index = map - > index < < PAGE_SHIFT ;
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:01 +03:00
if ( copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
static long gntdev_ioctl_unmap_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_unmap_grant_ref __user * u )
{
struct ioctl_gntdev_unmap_grant_ref op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2010-12-14 21:40:46 +03:00
int err = - ENOENT ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, del %d+%d \n " , priv , ( int ) op . index , ( int ) op . count ) ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
map = gntdev_find_map_index ( priv , op . index > > PAGE_SHIFT , op . count ) ;
2011-02-03 20:19:01 +03:00
if ( map ) {
list_del ( & map - > next ) ;
err = 0 ;
}
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-10-11 23:16:06 +04:00
if ( map )
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
static long gntdev_ioctl_get_offset_for_vaddr ( struct gntdev_priv * priv ,
struct ioctl_gntdev_get_offset_for_vaddr __user * u )
{
struct ioctl_gntdev_get_offset_for_vaddr op ;
2011-02-03 20:19:00 +03:00
struct vm_area_struct * vma ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2013-01-03 02:57:11 +04:00
int rv = - EINVAL ;
2010-12-14 21:40:46 +03:00
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, offset for vaddr %lx \n " , priv , ( unsigned long ) op . vaddr ) ;
2020-06-09 07:33:25 +03:00
mmap_read_lock ( current - > mm ) ;
2011-02-03 20:19:00 +03:00
vma = find_vma ( current - > mm , op . vaddr ) ;
if ( ! vma | | vma - > vm_ops ! = & gntdev_vmops )
2013-01-03 02:57:11 +04:00
goto out_unlock ;
2011-02-03 20:19:00 +03:00
map = vma - > vm_private_data ;
if ( ! map )
2013-01-03 02:57:11 +04:00
goto out_unlock ;
2011-02-03 20:19:00 +03:00
2010-12-14 21:40:46 +03:00
op . offset = map - > index < < PAGE_SHIFT ;
op . count = map - > count ;
2013-01-03 02:57:11 +04:00
rv = 0 ;
2010-12-14 21:40:46 +03:00
2013-01-03 02:57:11 +04:00
out_unlock :
2020-06-09 07:33:25 +03:00
mmap_read_unlock ( current - > mm ) ;
2013-01-03 02:57:11 +04:00
if ( rv = = 0 & & copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
2010-12-14 21:40:46 +03:00
return - EFAULT ;
2013-01-03 02:57:11 +04:00
return rv ;
2010-12-14 21:40:46 +03:00
}
2011-02-03 20:19:04 +03:00
static long gntdev_ioctl_notify ( struct gntdev_priv * priv , void __user * u )
{
struct ioctl_gntdev_unmap_notify op ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2011-02-03 20:19:04 +03:00
int rc ;
2011-10-28 01:58:49 +04:00
int out_flags ;
2020-03-23 19:15:11 +03:00
evtchn_port_t out_event ;
2011-02-03 20:19:04 +03:00
if ( copy_from_user ( & op , u , sizeof ( op ) ) )
return - EFAULT ;
if ( op . action & ~ ( UNMAP_NOTIFY_CLEAR_BYTE | UNMAP_NOTIFY_SEND_EVENT ) )
return - EINVAL ;
2011-10-28 01:58:49 +04:00
/* We need to grab a reference to the event channel we are going to use
* to send the notify before releasing the reference we may already have
* ( if someone has called this ioctl twice ) . This is required so that
* it is possible to change the clear_byte part of the notification
* without disturbing the event channel part , which may now be the last
* reference to that event channel .
*/
if ( op . action & UNMAP_NOTIFY_SEND_EVENT ) {
if ( evtchn_get ( op . event_channel_port ) )
return - EINVAL ;
}
out_flags = op . action ;
out_event = op . event_channel_port ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2011-02-03 20:19:04 +03:00
list_for_each_entry ( map , & priv - > maps , next ) {
uint64_t begin = map - > index < < PAGE_SHIFT ;
uint64_t end = ( map - > index + map - > count ) < < PAGE_SHIFT ;
if ( op . index > = begin & & op . index < end )
goto found ;
}
rc = - ENOENT ;
goto unlock_out ;
found :
2011-02-10 02:15:50 +03:00
if ( ( op . action & UNMAP_NOTIFY_CLEAR_BYTE ) & &
( map - > flags & GNTMAP_readonly ) ) {
rc = - EINVAL ;
goto unlock_out ;
}
2011-10-28 01:58:49 +04:00
out_flags = map - > notify . flags ;
out_event = map - > notify . event ;
2011-02-03 20:19:04 +03:00
map - > notify . flags = op . action ;
map - > notify . addr = op . index - ( map - > index < < PAGE_SHIFT ) ;
map - > notify . event = op . event_channel_port ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
rc = 0 ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
unlock_out :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-10-28 01:58:49 +04:00
/* Drop the reference to the event channel we did not save in the map */
if ( out_flags & UNMAP_NOTIFY_SEND_EVENT )
evtchn_put ( out_event ) ;
2011-02-03 20:19:04 +03:00
return rc ;
}
2016-05-09 12:59:48 +03:00
# define GNTDEV_COPY_BATCH 16
2014-12-02 19:13:26 +03:00
struct gntdev_copy_batch {
struct gnttab_copy ops [ GNTDEV_COPY_BATCH ] ;
struct page * pages [ GNTDEV_COPY_BATCH ] ;
s16 __user * status [ GNTDEV_COPY_BATCH ] ;
unsigned int nr_ops ;
unsigned int nr_pages ;
2020-09-06 09:51:53 +03:00
bool writeable ;
2014-12-02 19:13:26 +03:00
} ;
static int gntdev_get_page ( struct gntdev_copy_batch * batch , void __user * virt ,
2020-09-06 09:51:53 +03:00
unsigned long * gfn )
2014-12-02 19:13:26 +03:00
{
unsigned long addr = ( unsigned long ) virt ;
struct page * page ;
unsigned long xen_pfn ;
int ret ;
2020-09-06 09:51:54 +03:00
ret = pin_user_pages_fast ( addr , 1 , batch - > writeable ? FOLL_WRITE : 0 , & page ) ;
2014-12-02 19:13:26 +03:00
if ( ret < 0 )
return ret ;
batch - > pages [ batch - > nr_pages + + ] = page ;
xen_pfn = page_to_xen_pfn ( page ) + XEN_PFN_DOWN ( addr & ~ PAGE_MASK ) ;
* gfn = pfn_to_gfn ( xen_pfn ) ;
return 0 ;
}
static void gntdev_put_pages ( struct gntdev_copy_batch * batch )
{
2020-09-06 09:51:54 +03:00
unpin_user_pages_dirty_lock ( batch - > pages , batch - > nr_pages , batch - > writeable ) ;
2014-12-02 19:13:26 +03:00
batch - > nr_pages = 0 ;
2020-09-06 09:51:53 +03:00
batch - > writeable = false ;
2014-12-02 19:13:26 +03:00
}
static int gntdev_copy ( struct gntdev_copy_batch * batch )
{
unsigned int i ;
gnttab_batch_copy ( batch - > ops , batch - > nr_ops ) ;
gntdev_put_pages ( batch ) ;
/*
* For each completed op , update the status if the op failed
* and all previous ops for the segment were successful .
*/
for ( i = 0 ; i < batch - > nr_ops ; i + + ) {
s16 status = batch - > ops [ i ] . status ;
s16 old_status ;
if ( status = = GNTST_okay )
continue ;
if ( __get_user ( old_status , batch - > status [ i ] ) )
return - EFAULT ;
if ( old_status ! = GNTST_okay )
continue ;
if ( __put_user ( status , batch - > status [ i ] ) )
return - EFAULT ;
}
batch - > nr_ops = 0 ;
return 0 ;
}
static int gntdev_grant_copy_seg ( struct gntdev_copy_batch * batch ,
struct gntdev_grant_copy_segment * seg ,
s16 __user * status )
{
uint16_t copied = 0 ;
/*
* Disallow local - > local copies since there is only space in
* batch - > pages for one page per - op and this would be a very
* expensive memcpy ( ) .
*/
if ( ! ( seg - > flags & ( GNTCOPY_source_gref | GNTCOPY_dest_gref ) ) )
return - EINVAL ;
/* Can't cross page if source/dest is a grant ref. */
if ( seg - > flags & GNTCOPY_source_gref ) {
if ( seg - > source . foreign . offset + seg - > len > XEN_PAGE_SIZE )
return - EINVAL ;
}
if ( seg - > flags & GNTCOPY_dest_gref ) {
if ( seg - > dest . foreign . offset + seg - > len > XEN_PAGE_SIZE )
return - EINVAL ;
}
if ( put_user ( GNTST_okay , status ) )
return - EFAULT ;
while ( copied < seg - > len ) {
struct gnttab_copy * op ;
void __user * virt ;
size_t len , off ;
unsigned long gfn ;
int ret ;
if ( batch - > nr_ops > = GNTDEV_COPY_BATCH ) {
ret = gntdev_copy ( batch ) ;
if ( ret < 0 )
return ret ;
}
len = seg - > len - copied ;
op = & batch - > ops [ batch - > nr_ops ] ;
op - > flags = 0 ;
if ( seg - > flags & GNTCOPY_source_gref ) {
op - > source . u . ref = seg - > source . foreign . ref ;
op - > source . domid = seg - > source . foreign . domid ;
op - > source . offset = seg - > source . foreign . offset + copied ;
op - > flags | = GNTCOPY_source_gref ;
} else {
virt = seg - > source . virt + copied ;
off = ( unsigned long ) virt & ~ XEN_PAGE_MASK ;
len = min ( len , ( size_t ) XEN_PAGE_SIZE - off ) ;
2020-09-06 09:51:53 +03:00
batch - > writeable = false ;
2014-12-02 19:13:26 +03:00
2020-09-06 09:51:53 +03:00
ret = gntdev_get_page ( batch , virt , & gfn ) ;
2014-12-02 19:13:26 +03:00
if ( ret < 0 )
return ret ;
op - > source . u . gmfn = gfn ;
op - > source . domid = DOMID_SELF ;
op - > source . offset = off ;
}
if ( seg - > flags & GNTCOPY_dest_gref ) {
op - > dest . u . ref = seg - > dest . foreign . ref ;
op - > dest . domid = seg - > dest . foreign . domid ;
op - > dest . offset = seg - > dest . foreign . offset + copied ;
op - > flags | = GNTCOPY_dest_gref ;
} else {
virt = seg - > dest . virt + copied ;
off = ( unsigned long ) virt & ~ XEN_PAGE_MASK ;
len = min ( len , ( size_t ) XEN_PAGE_SIZE - off ) ;
2020-09-06 09:51:53 +03:00
batch - > writeable = true ;
2014-12-02 19:13:26 +03:00
2020-09-06 09:51:53 +03:00
ret = gntdev_get_page ( batch , virt , & gfn ) ;
2014-12-02 19:13:26 +03:00
if ( ret < 0 )
return ret ;
op - > dest . u . gmfn = gfn ;
op - > dest . domid = DOMID_SELF ;
op - > dest . offset = off ;
}
op - > len = len ;
copied + = len ;
batch - > status [ batch - > nr_ops ] = status ;
batch - > nr_ops + + ;
}
return 0 ;
}
static long gntdev_ioctl_grant_copy ( struct gntdev_priv * priv , void __user * u )
{
struct ioctl_gntdev_grant_copy copy ;
struct gntdev_copy_batch batch ;
unsigned int i ;
int ret = 0 ;
if ( copy_from_user ( & copy , u , sizeof ( copy ) ) )
return - EFAULT ;
batch . nr_ops = 0 ;
batch . nr_pages = 0 ;
for ( i = 0 ; i < copy . count ; i + + ) {
struct gntdev_grant_copy_segment seg ;
if ( copy_from_user ( & seg , & copy . segments [ i ] , sizeof ( seg ) ) ) {
ret = - EFAULT ;
goto out ;
}
ret = gntdev_grant_copy_seg ( & batch , & seg , & copy . segments [ i ] . status ) ;
if ( ret < 0 )
goto out ;
cond_resched ( ) ;
}
if ( batch . nr_ops )
ret = gntdev_copy ( & batch ) ;
return ret ;
out :
gntdev_put_pages ( & batch ) ;
return ret ;
}
2010-12-14 21:40:46 +03:00
static long gntdev_ioctl ( struct file * flip ,
unsigned int cmd , unsigned long arg )
{
struct gntdev_priv * priv = flip - > private_data ;
void __user * ptr = ( void __user * ) arg ;
switch ( cmd ) {
case IOCTL_GNTDEV_MAP_GRANT_REF :
return gntdev_ioctl_map_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_UNMAP_GRANT_REF :
return gntdev_ioctl_unmap_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR :
return gntdev_ioctl_get_offset_for_vaddr ( priv , ptr ) ;
2011-02-03 20:19:04 +03:00
case IOCTL_GNTDEV_SET_UNMAP_NOTIFY :
return gntdev_ioctl_notify ( priv , ptr ) ;
2014-12-02 19:13:26 +03:00
case IOCTL_GNTDEV_GRANT_COPY :
return gntdev_ioctl_grant_copy ( priv , ptr ) ;
2018-07-20 12:01:48 +03:00
# ifdef CONFIG_XEN_GNTDEV_DMABUF
case IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS :
return gntdev_ioctl_dmabuf_exp_from_refs ( priv , use_ptemod , ptr ) ;
case IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED :
return gntdev_ioctl_dmabuf_exp_wait_released ( priv , ptr ) ;
case IOCTL_GNTDEV_DMABUF_IMP_TO_REFS :
return gntdev_ioctl_dmabuf_imp_to_refs ( priv , ptr ) ;
case IOCTL_GNTDEV_DMABUF_IMP_RELEASE :
return gntdev_ioctl_dmabuf_imp_release ( priv , ptr ) ;
# endif
2010-12-14 21:40:46 +03:00
default :
pr_debug ( " priv %p, unknown cmd %x \n " , priv , cmd ) ;
return - ENOIOCTLCMD ;
}
return 0 ;
}
static int gntdev_mmap ( struct file * flip , struct vm_area_struct * vma )
{
struct gntdev_priv * priv = flip - > private_data ;
int index = vma - > vm_pgoff ;
2016-05-24 03:04:32 +03:00
int count = vma_pages ( vma ) ;
2018-07-20 12:01:47 +03:00
struct gntdev_grant_map * map ;
2019-05-14 03:22:23 +03:00
int err = - EINVAL ;
2010-12-14 21:40:46 +03:00
if ( ( vma - > vm_flags & VM_WRITE ) & & ! ( vma - > vm_flags & VM_SHARED ) )
return - EINVAL ;
pr_debug ( " map %d+%d at %lx (pgoff %lx) \n " ,
index , count , vma - > vm_start , vma - > vm_pgoff ) ;
2015-01-09 21:06:12 +03:00
mutex_lock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
map = gntdev_find_map_index ( priv , index , count ) ;
if ( ! map )
goto unlock_out ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod & & map - > vma )
2010-12-14 21:40:46 +03:00
goto unlock_out ;
2017-03-06 17:21:16 +03:00
refcount_inc ( & map - > users ) ;
2011-02-03 20:19:01 +03:00
2010-12-14 21:40:46 +03:00
vma - > vm_ops = & gntdev_vmops ;
2016-11-21 17:56:06 +03:00
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP ;
2011-03-07 23:18:57 +03:00
if ( use_ptemod )
2012-04-03 21:05:47 +04:00
vma - > vm_flags | = VM_DONTCOPY ;
2010-12-14 21:40:46 +03:00
vma - > vm_private_data = map ;
2011-02-10 00:11:32 +03:00
if ( map - > flags ) {
if ( ( vma - > vm_flags & VM_WRITE ) & &
( map - > flags & GNTMAP_readonly ) )
2011-03-19 08:45:43 +03:00
goto out_unlock_put ;
2011-02-10 00:11:32 +03:00
} else {
map - > flags = GNTMAP_host_map ;
if ( ! ( vma - > vm_flags & VM_WRITE ) )
map - > flags | = GNTMAP_readonly ;
}
2010-12-14 21:40:46 +03:00
2019-11-12 23:22:31 +03:00
if ( use_ptemod ) {
map - > vma = vma ;
err = mmu_interval_notifier_insert_locked (
& map - > notifier , vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start , & gntdev_mmu_ops ) ;
if ( err )
goto out_unlock_put ;
}
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-01-07 14:51:47 +03:00
2011-02-03 20:19:02 +03:00
if ( use_ptemod ) {
2020-01-28 18:31:26 +03:00
/*
* gntdev takes the address of the PTE in find_grant_ptes ( ) and
* passes it to the hypervisor in gntdev_map_grant_pages ( ) . The
* purpose of the notifier is to prevent the hypervisor pointer
* to the PTE from going stale .
*
* Since this vma ' s mappings can ' t be touched without the
2020-06-09 07:33:54 +03:00
* mmap_lock , and we are holding it now , there is no need for
2020-01-28 18:31:26 +03:00
* the notifier_range locking pattern .
*/
mmu_interval_read_begin ( & map - > notifier ) ;
2017-10-25 18:08:07 +03:00
map - > pages_vm_start = vma - > vm_start ;
2011-02-03 20:19:02 +03:00
err = apply_to_page_range ( vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start ,
find_grant_ptes , map ) ;
if ( err ) {
2013-06-28 14:21:41 +04:00
pr_warn ( " find_grant_ptes() failure. \n " ) ;
2011-02-03 22:16:54 +03:00
goto out_put_map ;
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
}
2018-07-20 12:01:47 +03:00
err = gntdev_map_grant_pages ( map ) ;
2011-02-03 22:16:54 +03:00
if ( err )
goto out_put_map ;
2011-01-07 14:51:47 +03:00
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
xen/gntdev.c: Replace vm_map_pages() with vm_map_pages_zero()
'commit df9bde015a72 ("xen/gntdev.c: convert to use vm_map_pages()")'
breaks gntdev driver. If vma->vm_pgoff > 0, vm_map_pages()
will:
- use map->pages starting at vma->vm_pgoff instead of 0
- verify map->count against vma_pages()+vma->vm_pgoff instead of just
vma_pages().
In practice, this breaks using a single gntdev FD for mapping multiple
grants.
relevant strace output:
[pid 857] ioctl(7, IOCTL_GNTDEV_MAP_GRANT_REF, 0x7ffd3407b6d0) = 0
[pid 857] mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, 7, 0) =
0x777f1211b000
[pid 857] ioctl(7, IOCTL_GNTDEV_SET_UNMAP_NOTIFY, 0x7ffd3407b710) = 0
[pid 857] ioctl(7, IOCTL_GNTDEV_MAP_GRANT_REF, 0x7ffd3407b6d0) = 0
[pid 857] mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, 7,
0x1000) = -1 ENXIO (No such device or address)
details here:
https://github.com/QubesOS/qubes-issues/issues/5199
The reason is -> ( copying Marek's word from discussion)
vma->vm_pgoff is used as index passed to gntdev_find_map_index. It's
basically using this parameter for "which grant reference to map".
map struct returned by gntdev_find_map_index() describes just the pages
to be mapped. Specifically map->pages[0] should be mapped at
vma->vm_start, not vma->vm_start+vma->vm_pgoff*PAGE_SIZE.
When trying to map grant with index (aka vma->vm_pgoff) > 1,
__vm_map_pages() will refuse to map it because it will expect map->count
to be at least vma_pages(vma)+vma->vm_pgoff, while it is exactly
vma_pages(vma).
Converting vm_map_pages() to use vm_map_pages_zero() will fix the
problem.
Marek has tested and confirmed the same.
Cc: stable@vger.kernel.org # v5.2+
Fixes: df9bde015a72 ("xen/gntdev.c: convert to use vm_map_pages()")
Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Tested-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: Juergen Gross <jgross@suse.com>
2019-07-30 21:34:56 +03:00
err = vm_map_pages_zero ( vma , map - > pages , map - > count ) ;
2019-05-14 03:22:23 +03:00
if ( err )
goto out_put_map ;
2014-12-18 17:56:54 +03:00
} else {
# ifdef CONFIG_X86
/*
* If the PTEs were not made special by the grant map
* hypercall , do so here .
*
* This is racy since the mapping is already visible
* to userspace but userspace should be well - behaved
* enough to not touch it until the mmap ( ) call
* returns .
*/
if ( ! xen_feature ( XENFEAT_gnttab_map_avail_bits ) ) {
apply_to_page_range ( vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start ,
set_grant_ptes_as_special , NULL ) ;
}
# endif
2011-02-03 20:19:02 +03:00
}
2011-01-07 14:51:47 +03:00
return 0 ;
2010-12-14 21:40:46 +03:00
unlock_out :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2010-12-14 21:40:46 +03:00
return err ;
2011-02-03 22:16:54 +03:00
2011-03-19 08:45:43 +03:00
out_unlock_put :
2015-01-09 21:06:12 +03:00
mutex_unlock ( & priv - > lock ) ;
2011-02-03 22:16:54 +03:00
out_put_map :
2018-01-09 15:10:22 +03:00
if ( use_ptemod ) {
unmap_grant_pages ( map , 0 , map - > count ) ;
2019-11-12 23:22:31 +03:00
if ( map - > vma ) {
mmu_interval_notifier_remove ( & map - > notifier ) ;
map - > vma = NULL ;
}
2018-01-09 15:10:22 +03:00
}
2013-01-03 02:57:12 +04:00
gntdev_put_map ( priv , map ) ;
2011-02-03 22:16:54 +03:00
return err ;
2010-12-14 21:40:46 +03:00
}
static const struct file_operations gntdev_fops = {
. owner = THIS_MODULE ,
. open = gntdev_open ,
. release = gntdev_release ,
. mmap = gntdev_mmap ,
. unlocked_ioctl = gntdev_ioctl
} ;
static struct miscdevice gntdev_miscdev = {
. minor = MISC_DYNAMIC_MINOR ,
. name = " xen/gntdev " ,
. fops = & gntdev_fops ,
} ;
/* ------------------------------------------------------------------ */
static int __init gntdev_init ( void )
{
int err ;
if ( ! xen_domain ( ) )
return - ENODEV ;
2014-01-03 19:20:18 +04:00
use_ptemod = ! xen_feature ( XENFEAT_auto_translated_physmap ) ;
2011-02-03 20:19:02 +03:00
2010-12-14 21:40:46 +03:00
err = misc_register ( & gntdev_miscdev ) ;
if ( err ! = 0 ) {
2013-06-28 14:21:41 +04:00
pr_err ( " Could not register gntdev device \n " ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
return 0 ;
}
static void __exit gntdev_exit ( void )
{
misc_deregister ( & gntdev_miscdev ) ;
}
module_init ( gntdev_init ) ;
module_exit ( gntdev_exit ) ;
/* ------------------------------------------------------------------ */