2010-12-14 21:40:46 +03:00
/******************************************************************************
* gntdev . c
*
* Device for accessing ( in user - space ) pages that have been granted by other
* domains .
*
* Copyright ( c ) 2006 - 2007 , D G Murray .
* ( c ) 2009 Gerd Hoffmann < kraxel @ redhat . com >
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , write to the Free Software
* Foundation , Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
*/
# undef DEBUG
# include <linux/module.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/miscdevice.h>
# include <linux/fs.h>
# include <linux/mm.h>
# include <linux/mman.h>
# include <linux/mmu_notifier.h>
# include <linux/types.h>
# include <linux/uaccess.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
# include <linux/slab.h>
2011-02-03 20:19:02 +03:00
# include <linux/highmem.h>
2010-12-14 21:40:46 +03:00
# include <xen/xen.h>
# include <xen/grant_table.h>
2011-03-10 02:07:34 +03:00
# include <xen/balloon.h>
2010-12-14 21:40:46 +03:00
# include <xen/gntdev.h>
2011-02-03 20:19:04 +03:00
# include <xen/events.h>
2010-12-14 21:40:46 +03:00
# include <asm/xen/hypervisor.h>
# include <asm/xen/hypercall.h>
# include <asm/xen/page.h>
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
" Gerd Hoffmann <kraxel@redhat.com> " ) ;
MODULE_DESCRIPTION ( " User-space granted page access driver " ) ;
2011-02-03 20:18:59 +03:00
static int limit = 1024 * 1024 ;
2010-12-14 21:40:46 +03:00
module_param ( limit , int , 0644 ) ;
2011-02-03 20:18:59 +03:00
MODULE_PARM_DESC ( limit , " Maximum number of grants that may be mapped by "
" the gntdev device " ) ;
static atomic_t pages_mapped = ATOMIC_INIT ( 0 ) ;
2010-12-14 21:40:46 +03:00
2011-02-03 20:19:02 +03:00
static int use_ptemod ;
2010-12-14 21:40:46 +03:00
struct gntdev_priv {
struct list_head maps ;
/* lock protects maps from concurrent changes */
spinlock_t lock ;
struct mm_struct * mm ;
struct mmu_notifier mn ;
} ;
2011-02-03 20:19:04 +03:00
struct unmap_notify {
int flags ;
/* Address relative to the start of the grant_map */
int addr ;
int event ;
} ;
2010-12-14 21:40:46 +03:00
struct grant_map {
struct list_head next ;
struct vm_area_struct * vma ;
int index ;
int count ;
int flags ;
2011-02-03 20:19:01 +03:00
atomic_t users ;
2011-02-03 20:19:04 +03:00
struct unmap_notify notify ;
2010-12-14 21:40:46 +03:00
struct ioctl_gntdev_grant_ref * grants ;
struct gnttab_map_grant_ref * map_ops ;
struct gnttab_unmap_grant_ref * unmap_ops ;
2011-09-29 14:57:56 +04:00
struct gnttab_map_grant_ref * kmap_ops ;
2010-12-10 17:56:42 +03:00
struct page * * pages ;
2010-12-14 21:40:46 +03:00
} ;
2011-02-03 20:19:02 +03:00
static int unmap_grant_pages ( struct grant_map * map , int offset , int pages ) ;
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
static void gntdev_print_maps ( struct gntdev_priv * priv ,
char * text , int text_index )
{
# ifdef DEBUG
struct grant_map * map ;
2011-02-03 20:18:59 +03:00
pr_debug ( " %s: maps list (priv %p) \n " , __func__ , priv ) ;
2010-12-14 21:40:46 +03:00
list_for_each_entry ( map , & priv - > maps , next )
pr_debug ( " index %2d, count %2d %s \n " ,
map - > index , map - > count ,
map - > index = = text_index & & text ? text : " " ) ;
# endif
}
2012-10-24 15:39:02 +04:00
static void gntdev_free_map ( struct grant_map * map )
{
if ( map = = NULL )
return ;
if ( map - > pages )
free_xenballooned_pages ( map - > count , map - > pages ) ;
kfree ( map - > pages ) ;
kfree ( map - > grants ) ;
kfree ( map - > map_ops ) ;
kfree ( map - > unmap_ops ) ;
kfree ( map - > kmap_ops ) ;
kfree ( map ) ;
}
2010-12-14 21:40:46 +03:00
static struct grant_map * gntdev_alloc_map ( struct gntdev_priv * priv , int count )
{
struct grant_map * add ;
2010-12-10 17:56:42 +03:00
int i ;
2010-12-14 21:40:46 +03:00
add = kzalloc ( sizeof ( struct grant_map ) , GFP_KERNEL ) ;
if ( NULL = = add )
return NULL ;
2011-11-04 22:23:32 +04:00
add - > grants = kcalloc ( count , sizeof ( add - > grants [ 0 ] ) , GFP_KERNEL ) ;
add - > map_ops = kcalloc ( count , sizeof ( add - > map_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > unmap_ops = kcalloc ( count , sizeof ( add - > unmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > kmap_ops = kcalloc ( count , sizeof ( add - > kmap_ops [ 0 ] ) , GFP_KERNEL ) ;
add - > pages = kcalloc ( count , sizeof ( add - > pages [ 0 ] ) , GFP_KERNEL ) ;
2010-12-10 17:56:42 +03:00
if ( NULL = = add - > grants | |
NULL = = add - > map_ops | |
NULL = = add - > unmap_ops | |
2011-09-29 14:57:56 +04:00
NULL = = add - > kmap_ops | |
2010-12-10 17:56:42 +03:00
NULL = = add - > pages )
2010-12-14 21:40:46 +03:00
goto err ;
2011-09-29 14:57:55 +04:00
if ( alloc_xenballooned_pages ( count , add - > pages , false /* lowmem */ ) )
2011-03-10 02:07:34 +03:00
goto err ;
2010-12-10 17:56:42 +03:00
for ( i = 0 ; i < count ; i + + ) {
2011-02-23 16:11:35 +03:00
add - > map_ops [ i ] . handle = - 1 ;
add - > unmap_ops [ i ] . handle = - 1 ;
2011-09-29 14:57:56 +04:00
add - > kmap_ops [ i ] . handle = - 1 ;
2010-12-10 17:56:42 +03:00
}
2010-12-14 21:40:46 +03:00
add - > index = 0 ;
add - > count = count ;
2011-02-03 20:19:01 +03:00
atomic_set ( & add - > users , 1 ) ;
2010-12-14 21:40:46 +03:00
return add ;
err :
2012-10-24 15:39:02 +04:00
gntdev_free_map ( add ) ;
2010-12-14 21:40:46 +03:00
return NULL ;
}
static void gntdev_add_map ( struct gntdev_priv * priv , struct grant_map * add )
{
struct grant_map * map ;
list_for_each_entry ( map , & priv - > maps , next ) {
if ( add - > index + add - > count < map - > index ) {
list_add_tail ( & add - > next , & map - > next ) ;
goto done ;
}
add - > index = map - > index + map - > count ;
}
list_add_tail ( & add - > next , & priv - > maps ) ;
done :
gntdev_print_maps ( priv , " [new] " , add - > index ) ;
}
static struct grant_map * gntdev_find_map_index ( struct gntdev_priv * priv ,
int index , int count )
{
struct grant_map * map ;
list_for_each_entry ( map , & priv - > maps , next ) {
if ( map - > index ! = index )
continue ;
2011-02-03 20:19:04 +03:00
if ( count & & map - > count ! = count )
2010-12-14 21:40:46 +03:00
continue ;
return map ;
}
return NULL ;
}
2011-02-03 20:19:01 +03:00
static void gntdev_put_map ( struct grant_map * map )
2010-12-14 21:40:46 +03:00
{
if ( ! map )
return ;
2010-12-10 17:56:42 +03:00
2011-02-03 20:19:01 +03:00
if ( ! atomic_dec_and_test ( & map - > users ) )
return ;
atomic_sub ( map - > count , & pages_mapped ) ;
2011-10-28 01:58:49 +04:00
if ( map - > notify . flags & UNMAP_NOTIFY_SEND_EVENT ) {
2011-02-03 20:19:04 +03:00
notify_remote_via_evtchn ( map - > notify . event ) ;
2011-10-28 01:58:49 +04:00
evtchn_put ( map - > notify . event ) ;
}
2011-02-03 20:19:04 +03:00
2012-10-24 15:39:02 +04:00
if ( map - > pages & & ! use_ptemod )
unmap_grant_pages ( map , 0 , map - > count ) ;
gntdev_free_map ( map ) ;
2010-12-14 21:40:46 +03:00
}
/* ------------------------------------------------------------------ */
static int find_grant_ptes ( pte_t * pte , pgtable_t token ,
unsigned long addr , void * data )
{
struct grant_map * map = data ;
unsigned int pgnr = ( addr - map - > vma - > vm_start ) > > PAGE_SHIFT ;
2011-02-03 20:19:02 +03:00
int flags = map - > flags | GNTMAP_application_map | GNTMAP_contains_pte ;
2010-12-14 21:40:46 +03:00
u64 pte_maddr ;
BUG_ON ( pgnr > = map - > count ) ;
2010-12-08 21:54:32 +03:00
pte_maddr = arbitrary_virt_to_machine ( pte ) . maddr ;
2011-02-03 20:19:02 +03:00
gnttab_set_map_op ( & map - > map_ops [ pgnr ] , pte_maddr , flags ,
2010-12-14 21:40:46 +03:00
map - > grants [ pgnr ] . ref ,
map - > grants [ pgnr ] . domid ) ;
2011-02-03 20:19:02 +03:00
gnttab_set_unmap_op ( & map - > unmap_ops [ pgnr ] , pte_maddr , flags ,
2011-02-23 16:11:35 +03:00
- 1 /* handle */ ) ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
static int map_grant_pages ( struct grant_map * map )
{
int i , err = 0 ;
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
2011-02-10 00:11:32 +03:00
/* Note: it could already be mapped */
2011-02-23 16:11:35 +03:00
if ( map - > map_ops [ 0 ] . handle ! = - 1 )
2011-02-10 00:11:32 +03:00
return 0 ;
2011-02-03 20:19:02 +03:00
for ( i = 0 ; i < map - > count ; i + + ) {
2011-03-08 19:56:43 +03:00
unsigned long addr = ( unsigned long )
2011-02-03 20:19:02 +03:00
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
gnttab_set_map_op ( & map - > map_ops [ i ] , addr , map - > flags ,
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
gnttab_set_unmap_op ( & map - > unmap_ops [ i ] , addr ,
2011-02-23 16:11:35 +03:00
map - > flags , - 1 /* handle */ ) ;
2011-02-03 20:19:02 +03:00
}
2011-09-29 14:57:56 +04:00
} else {
/*
* Setup the map_ops corresponding to the pte entries pointing
* to the kernel linear addresses of the struct pages .
* These ptes are completely different from the user ptes dealt
* with find_grant_ptes .
*/
for ( i = 0 ; i < map - > count ; i + + ) {
unsigned level ;
unsigned long address = ( unsigned long )
pfn_to_kaddr ( page_to_pfn ( map - > pages [ i ] ) ) ;
pte_t * ptep ;
u64 pte_maddr = 0 ;
BUG_ON ( PageHighMem ( map - > pages [ i ] ) ) ;
ptep = lookup_address ( address , & level ) ;
pte_maddr = arbitrary_virt_to_machine ( ptep ) . maddr ;
gnttab_set_map_op ( & map - > kmap_ops [ i ] , pte_maddr ,
map - > flags |
GNTMAP_host_map |
GNTMAP_contains_pte ,
map - > grants [ i ] . ref ,
map - > grants [ i ] . domid ) ;
}
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
pr_debug ( " map %d+%d \n " , map - > index , map - > count ) ;
2011-09-29 14:57:56 +04:00
err = gnttab_map_refs ( map - > map_ops , use_ptemod ? map - > kmap_ops : NULL ,
map - > pages , map - > count ) ;
2010-12-14 21:40:46 +03:00
if ( err )
return err ;
for ( i = 0 ; i < map - > count ; i + + ) {
if ( map - > map_ops [ i ] . status )
err = - EINVAL ;
2011-02-23 16:11:35 +03:00
else {
BUG_ON ( map - > map_ops [ i ] . handle = = - 1 ) ;
map - > unmap_ops [ i ] . handle = map - > map_ops [ i ] . handle ;
pr_debug ( " map handle=%d \n " , map - > map_ops [ i ] . handle ) ;
}
2010-12-14 21:40:46 +03:00
}
return err ;
}
2011-02-09 23:12:00 +03:00
static int __unmap_grant_pages ( struct grant_map * map , int offset , int pages )
2010-12-14 21:40:46 +03:00
{
int i , err = 0 ;
2011-02-03 20:19:04 +03:00
if ( map - > notify . flags & UNMAP_NOTIFY_CLEAR_BYTE ) {
int pgno = ( map - > notify . addr > > PAGE_SHIFT ) ;
2011-02-08 17:14:06 +03:00
if ( pgno > = offset & & pgno < offset + pages & & use_ptemod ) {
2011-02-23 16:11:36 +03:00
void __user * tmp = ( void __user * )
map - > vma - > vm_start + map - > notify . addr ;
2011-02-10 02:15:50 +03:00
err = copy_to_user ( tmp , & err , 1 ) ;
if ( err )
2011-03-19 08:44:34 +03:00
return - EFAULT ;
2011-02-08 17:14:06 +03:00
map - > notify . flags & = ~ UNMAP_NOTIFY_CLEAR_BYTE ;
} else if ( pgno > = offset & & pgno < offset + pages ) {
2011-02-03 20:19:04 +03:00
uint8_t * tmp = kmap ( map - > pages [ pgno ] ) ;
tmp [ map - > notify . addr & ( PAGE_SIZE - 1 ) ] = 0 ;
kunmap ( map - > pages [ pgno ] ) ;
map - > notify . flags & = ~ UNMAP_NOTIFY_CLEAR_BYTE ;
}
}
2012-09-12 15:44:30 +04:00
err = gnttab_unmap_refs ( map - > unmap_ops + offset ,
use_ptemod ? map - > kmap_ops + offset : NULL , map - > pages + offset ,
pages ) ;
2010-12-14 21:40:46 +03:00
if ( err )
return err ;
for ( i = 0 ; i < pages ; i + + ) {
if ( map - > unmap_ops [ offset + i ] . status )
err = - EINVAL ;
2011-02-23 16:11:35 +03:00
pr_debug ( " unmap handle=%d st=%d \n " ,
map - > unmap_ops [ offset + i ] . handle ,
map - > unmap_ops [ offset + i ] . status ) ;
map - > unmap_ops [ offset + i ] . handle = - 1 ;
2010-12-14 21:40:46 +03:00
}
return err ;
}
2011-02-09 23:12:00 +03:00
static int unmap_grant_pages ( struct grant_map * map , int offset , int pages )
{
int range , err = 0 ;
pr_debug ( " unmap %d+%d [%d+%d] \n " , map - > index , map - > count , offset , pages ) ;
/* It is possible the requested range will have a "hole" where we
* already unmapped some of the grants . Only unmap valid ranges .
*/
while ( pages & & ! err ) {
2011-02-23 16:11:35 +03:00
while ( pages & & map - > unmap_ops [ offset ] . handle = = - 1 ) {
2011-02-09 23:12:00 +03:00
offset + + ;
pages - - ;
}
range = 0 ;
while ( range < pages ) {
2011-02-23 16:11:35 +03:00
if ( map - > unmap_ops [ offset + range ] . handle = = - 1 ) {
2011-02-09 23:12:00 +03:00
range - - ;
break ;
}
range + + ;
}
err = __unmap_grant_pages ( map , offset , range ) ;
offset + = range ;
pages - = range ;
}
return err ;
}
2010-12-14 21:40:46 +03:00
/* ------------------------------------------------------------------ */
2011-03-07 23:18:57 +03:00
static void gntdev_vma_open ( struct vm_area_struct * vma )
{
struct grant_map * map = vma - > vm_private_data ;
pr_debug ( " gntdev_vma_open %p \n " , vma ) ;
atomic_inc ( & map - > users ) ;
}
2010-12-14 21:40:46 +03:00
static void gntdev_vma_close ( struct vm_area_struct * vma )
{
struct grant_map * map = vma - > vm_private_data ;
2011-03-07 23:18:57 +03:00
pr_debug ( " gntdev_vma_close %p \n " , vma ) ;
2010-12-14 21:40:46 +03:00
map - > vma = NULL ;
vma - > vm_private_data = NULL ;
2011-02-03 20:19:01 +03:00
gntdev_put_map ( map ) ;
2010-12-14 21:40:46 +03:00
}
static struct vm_operations_struct gntdev_vmops = {
2011-03-07 23:18:57 +03:00
. open = gntdev_vma_open ,
2010-12-14 21:40:46 +03:00
. close = gntdev_vma_close ,
} ;
/* ------------------------------------------------------------------ */
static void mn_invl_range_start ( struct mmu_notifier * mn ,
struct mm_struct * mm ,
unsigned long start , unsigned long end )
{
struct gntdev_priv * priv = container_of ( mn , struct gntdev_priv , mn ) ;
struct grant_map * map ;
unsigned long mstart , mend ;
int err ;
spin_lock ( & priv - > lock ) ;
list_for_each_entry ( map , & priv - > maps , next ) {
if ( ! map - > vma )
continue ;
if ( map - > vma - > vm_start > = end )
continue ;
if ( map - > vma - > vm_end < = start )
continue ;
mstart = max ( start , map - > vma - > vm_start ) ;
mend = min ( end , map - > vma - > vm_end ) ;
pr_debug ( " map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ,
start , end , mstart , mend ) ;
err = unmap_grant_pages ( map ,
( mstart - map - > vma - > vm_start ) > > PAGE_SHIFT ,
( mend - mstart ) > > PAGE_SHIFT ) ;
WARN_ON ( err ) ;
}
spin_unlock ( & priv - > lock ) ;
}
static void mn_invl_page ( struct mmu_notifier * mn ,
struct mm_struct * mm ,
unsigned long address )
{
mn_invl_range_start ( mn , mm , address , address + PAGE_SIZE ) ;
}
static void mn_release ( struct mmu_notifier * mn ,
struct mm_struct * mm )
{
struct gntdev_priv * priv = container_of ( mn , struct gntdev_priv , mn ) ;
struct grant_map * map ;
int err ;
spin_lock ( & priv - > lock ) ;
list_for_each_entry ( map , & priv - > maps , next ) {
if ( ! map - > vma )
continue ;
pr_debug ( " map %d+%d (%lx %lx) \n " ,
map - > index , map - > count ,
map - > vma - > vm_start , map - > vma - > vm_end ) ;
err = unmap_grant_pages ( map , /* offset */ 0 , map - > count ) ;
WARN_ON ( err ) ;
}
spin_unlock ( & priv - > lock ) ;
}
2012-08-21 22:49:34 +04:00
static struct mmu_notifier_ops gntdev_mmu_ops = {
2010-12-14 21:40:46 +03:00
. release = mn_release ,
. invalidate_page = mn_invl_page ,
. invalidate_range_start = mn_invl_range_start ,
} ;
/* ------------------------------------------------------------------ */
static int gntdev_open ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv ;
int ret = 0 ;
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
if ( ! priv )
return - ENOMEM ;
INIT_LIST_HEAD ( & priv - > maps ) ;
spin_lock_init ( & priv - > lock ) ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod ) {
priv - > mm = get_task_mm ( current ) ;
if ( ! priv - > mm ) {
kfree ( priv ) ;
return - ENOMEM ;
}
priv - > mn . ops = & gntdev_mmu_ops ;
ret = mmu_notifier_register ( & priv - > mn , priv - > mm ) ;
mmput ( priv - > mm ) ;
2010-12-14 21:40:46 +03:00
}
if ( ret ) {
kfree ( priv ) ;
return ret ;
}
flip - > private_data = priv ;
pr_debug ( " priv %p \n " , priv ) ;
return 0 ;
}
static int gntdev_release ( struct inode * inode , struct file * flip )
{
struct gntdev_priv * priv = flip - > private_data ;
struct grant_map * map ;
pr_debug ( " priv %p \n " , priv ) ;
while ( ! list_empty ( & priv - > maps ) ) {
map = list_entry ( priv - > maps . next , struct grant_map , next ) ;
2011-02-03 20:19:01 +03:00
list_del ( & map - > next ) ;
gntdev_put_map ( map ) ;
2010-12-14 21:40:46 +03:00
}
2011-02-03 20:19:02 +03:00
if ( use_ptemod )
mmu_notifier_unregister ( & priv - > mn , priv - > mm ) ;
2010-12-14 21:40:46 +03:00
kfree ( priv ) ;
return 0 ;
}
static long gntdev_ioctl_map_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_map_grant_ref __user * u )
{
struct ioctl_gntdev_map_grant_ref op ;
struct grant_map * map ;
int err ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, add %d \n " , priv , op . count ) ;
if ( unlikely ( op . count < = 0 ) )
return - EINVAL ;
err = - ENOMEM ;
map = gntdev_alloc_map ( priv , op . count ) ;
if ( ! map )
return err ;
2011-02-03 20:18:59 +03:00
2011-02-03 20:19:01 +03:00
if ( unlikely ( atomic_add_return ( op . count , & pages_mapped ) > limit ) ) {
pr_debug ( " can't map: over limit \n " ) ;
gntdev_put_map ( map ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
2011-02-03 20:19:01 +03:00
if ( copy_from_user ( map - > grants , & u - > refs ,
sizeof ( map - > grants [ 0 ] ) * op . count ) ! = 0 ) {
gntdev_put_map ( map ) ;
2011-02-03 20:18:59 +03:00
return err ;
}
2010-12-14 21:40:46 +03:00
spin_lock ( & priv - > lock ) ;
gntdev_add_map ( priv , map ) ;
op . index = map - > index < < PAGE_SHIFT ;
spin_unlock ( & priv - > lock ) ;
2011-02-03 20:19:01 +03:00
if ( copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
2010-12-14 21:40:46 +03:00
return 0 ;
}
static long gntdev_ioctl_unmap_grant_ref ( struct gntdev_priv * priv ,
struct ioctl_gntdev_unmap_grant_ref __user * u )
{
struct ioctl_gntdev_unmap_grant_ref op ;
struct grant_map * map ;
int err = - ENOENT ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, del %d+%d \n " , priv , ( int ) op . index , ( int ) op . count ) ;
spin_lock ( & priv - > lock ) ;
map = gntdev_find_map_index ( priv , op . index > > PAGE_SHIFT , op . count ) ;
2011-02-03 20:19:01 +03:00
if ( map ) {
list_del ( & map - > next ) ;
err = 0 ;
}
2010-12-14 21:40:46 +03:00
spin_unlock ( & priv - > lock ) ;
2011-10-11 23:16:06 +04:00
if ( map )
gntdev_put_map ( map ) ;
2010-12-14 21:40:46 +03:00
return err ;
}
static long gntdev_ioctl_get_offset_for_vaddr ( struct gntdev_priv * priv ,
struct ioctl_gntdev_get_offset_for_vaddr __user * u )
{
struct ioctl_gntdev_get_offset_for_vaddr op ;
2011-02-03 20:19:00 +03:00
struct vm_area_struct * vma ;
2010-12-14 21:40:46 +03:00
struct grant_map * map ;
if ( copy_from_user ( & op , u , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
pr_debug ( " priv %p, offset for vaddr %lx \n " , priv , ( unsigned long ) op . vaddr ) ;
2011-02-03 20:19:00 +03:00
vma = find_vma ( current - > mm , op . vaddr ) ;
if ( ! vma | | vma - > vm_ops ! = & gntdev_vmops )
2010-12-14 21:40:46 +03:00
return - EINVAL ;
2011-02-03 20:19:00 +03:00
map = vma - > vm_private_data ;
if ( ! map )
return - EINVAL ;
2010-12-14 21:40:46 +03:00
op . offset = map - > index < < PAGE_SHIFT ;
op . count = map - > count ;
if ( copy_to_user ( u , & op , sizeof ( op ) ) ! = 0 )
return - EFAULT ;
return 0 ;
}
2011-02-03 20:19:04 +03:00
static long gntdev_ioctl_notify ( struct gntdev_priv * priv , void __user * u )
{
struct ioctl_gntdev_unmap_notify op ;
struct grant_map * map ;
int rc ;
2011-10-28 01:58:49 +04:00
int out_flags ;
unsigned int out_event ;
2011-02-03 20:19:04 +03:00
if ( copy_from_user ( & op , u , sizeof ( op ) ) )
return - EFAULT ;
if ( op . action & ~ ( UNMAP_NOTIFY_CLEAR_BYTE | UNMAP_NOTIFY_SEND_EVENT ) )
return - EINVAL ;
2011-10-28 01:58:49 +04:00
/* We need to grab a reference to the event channel we are going to use
* to send the notify before releasing the reference we may already have
* ( if someone has called this ioctl twice ) . This is required so that
* it is possible to change the clear_byte part of the notification
* without disturbing the event channel part , which may now be the last
* reference to that event channel .
*/
if ( op . action & UNMAP_NOTIFY_SEND_EVENT ) {
if ( evtchn_get ( op . event_channel_port ) )
return - EINVAL ;
}
out_flags = op . action ;
out_event = op . event_channel_port ;
2011-02-03 20:19:04 +03:00
spin_lock ( & priv - > lock ) ;
list_for_each_entry ( map , & priv - > maps , next ) {
uint64_t begin = map - > index < < PAGE_SHIFT ;
uint64_t end = ( map - > index + map - > count ) < < PAGE_SHIFT ;
if ( op . index > = begin & & op . index < end )
goto found ;
}
rc = - ENOENT ;
goto unlock_out ;
found :
2011-02-10 02:15:50 +03:00
if ( ( op . action & UNMAP_NOTIFY_CLEAR_BYTE ) & &
( map - > flags & GNTMAP_readonly ) ) {
rc = - EINVAL ;
goto unlock_out ;
}
2011-10-28 01:58:49 +04:00
out_flags = map - > notify . flags ;
out_event = map - > notify . event ;
2011-02-03 20:19:04 +03:00
map - > notify . flags = op . action ;
map - > notify . addr = op . index - ( map - > index < < PAGE_SHIFT ) ;
map - > notify . event = op . event_channel_port ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
rc = 0 ;
2011-10-28 01:58:49 +04:00
2011-02-03 20:19:04 +03:00
unlock_out :
spin_unlock ( & priv - > lock ) ;
2011-10-28 01:58:49 +04:00
/* Drop the reference to the event channel we did not save in the map */
if ( out_flags & UNMAP_NOTIFY_SEND_EVENT )
evtchn_put ( out_event ) ;
2011-02-03 20:19:04 +03:00
return rc ;
}
2010-12-14 21:40:46 +03:00
static long gntdev_ioctl ( struct file * flip ,
unsigned int cmd , unsigned long arg )
{
struct gntdev_priv * priv = flip - > private_data ;
void __user * ptr = ( void __user * ) arg ;
switch ( cmd ) {
case IOCTL_GNTDEV_MAP_GRANT_REF :
return gntdev_ioctl_map_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_UNMAP_GRANT_REF :
return gntdev_ioctl_unmap_grant_ref ( priv , ptr ) ;
case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR :
return gntdev_ioctl_get_offset_for_vaddr ( priv , ptr ) ;
2011-02-03 20:19:04 +03:00
case IOCTL_GNTDEV_SET_UNMAP_NOTIFY :
return gntdev_ioctl_notify ( priv , ptr ) ;
2010-12-14 21:40:46 +03:00
default :
pr_debug ( " priv %p, unknown cmd %x \n " , priv , cmd ) ;
return - ENOIOCTLCMD ;
}
return 0 ;
}
static int gntdev_mmap ( struct file * flip , struct vm_area_struct * vma )
{
struct gntdev_priv * priv = flip - > private_data ;
int index = vma - > vm_pgoff ;
int count = ( vma - > vm_end - vma - > vm_start ) > > PAGE_SHIFT ;
struct grant_map * map ;
2011-02-03 20:19:02 +03:00
int i , err = - EINVAL ;
2010-12-14 21:40:46 +03:00
if ( ( vma - > vm_flags & VM_WRITE ) & & ! ( vma - > vm_flags & VM_SHARED ) )
return - EINVAL ;
pr_debug ( " map %d+%d at %lx (pgoff %lx) \n " ,
index , count , vma - > vm_start , vma - > vm_pgoff ) ;
spin_lock ( & priv - > lock ) ;
map = gntdev_find_map_index ( priv , index , count ) ;
if ( ! map )
goto unlock_out ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod & & map - > vma )
2010-12-14 21:40:46 +03:00
goto unlock_out ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod & & priv - > mm ! = vma - > vm_mm ) {
2010-12-14 21:40:46 +03:00
printk ( KERN_WARNING " Huh? Other mm? \n " ) ;
goto unlock_out ;
}
2011-02-03 20:19:01 +03:00
atomic_inc ( & map - > users ) ;
2010-12-14 21:40:46 +03:00
vma - > vm_ops = & gntdev_vmops ;
mm: kill vma flag VM_RESERVED and mm->reserved_vm counter
A long time ago, in v2.4, VM_RESERVED kept swapout process off VMA,
currently it lost original meaning but still has some effects:
| effect | alternative flags
-+------------------------+---------------------------------------------
1| account as reserved_vm | VM_IO
2| skip in core dump | VM_IO, VM_DONTDUMP
3| do not merge or expand | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
4| do not mlock | VM_IO, VM_DONTEXPAND, VM_HUGETLB, VM_PFNMAP
This patch removes reserved_vm counter from mm_struct. Seems like nobody
cares about it, it does not exported into userspace directly, it only
reduces total_vm showed in proc.
Thus VM_RESERVED can be replaced with VM_IO or pair VM_DONTEXPAND | VM_DONTDUMP.
remap_pfn_range() and io_remap_pfn_range() set VM_IO|VM_DONTEXPAND|VM_DONTDUMP.
remap_vmalloc_range() set VM_DONTEXPAND | VM_DONTDUMP.
[akpm@linux-foundation.org: drivers/vfio/pci/vfio_pci.c fixup]
Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Carsten Otte <cotte@de.ibm.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Cyrill Gorcunov <gorcunov@openvz.org>
Cc: Eric Paris <eparis@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Morris <james.l.morris@oracle.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: Kentaro Takeda <takedakn@nttdata.co.jp>
Cc: Matt Helsley <matthltc@us.ibm.com>
Cc: Nick Piggin <npiggin@kernel.dk>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Venkatesh Pallipadi <venki@google.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-10-09 03:29:02 +04:00
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP ;
2011-03-07 23:18:57 +03:00
if ( use_ptemod )
2012-04-03 21:05:47 +04:00
vma - > vm_flags | = VM_DONTCOPY ;
2010-12-14 21:40:46 +03:00
vma - > vm_private_data = map ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod )
map - > vma = vma ;
2011-02-10 00:11:32 +03:00
if ( map - > flags ) {
if ( ( vma - > vm_flags & VM_WRITE ) & &
( map - > flags & GNTMAP_readonly ) )
2011-03-19 08:45:43 +03:00
goto out_unlock_put ;
2011-02-10 00:11:32 +03:00
} else {
map - > flags = GNTMAP_host_map ;
if ( ! ( vma - > vm_flags & VM_WRITE ) )
map - > flags | = GNTMAP_readonly ;
}
2010-12-14 21:40:46 +03:00
2011-01-07 14:51:47 +03:00
spin_unlock ( & priv - > lock ) ;
2011-02-03 20:19:02 +03:00
if ( use_ptemod ) {
err = apply_to_page_range ( vma - > vm_mm , vma - > vm_start ,
vma - > vm_end - vma - > vm_start ,
find_grant_ptes , map ) ;
if ( err ) {
printk ( KERN_WARNING " find_grant_ptes() failure. \n " ) ;
2011-02-03 22:16:54 +03:00
goto out_put_map ;
2011-02-03 20:19:02 +03:00
}
2010-12-14 21:40:46 +03:00
}
err = map_grant_pages ( map ) ;
2011-02-03 22:16:54 +03:00
if ( err )
goto out_put_map ;
2011-01-07 14:51:47 +03:00
2011-02-03 20:19:02 +03:00
if ( ! use_ptemod ) {
for ( i = 0 ; i < count ; i + + ) {
err = vm_insert_page ( vma , vma - > vm_start + i * PAGE_SIZE ,
map - > pages [ i ] ) ;
if ( err )
2011-02-03 22:16:54 +03:00
goto out_put_map ;
2011-02-03 20:19:02 +03:00
}
}
2011-01-07 14:51:47 +03:00
return 0 ;
2010-12-14 21:40:46 +03:00
unlock_out :
spin_unlock ( & priv - > lock ) ;
return err ;
2011-02-03 22:16:54 +03:00
2011-03-19 08:45:43 +03:00
out_unlock_put :
spin_unlock ( & priv - > lock ) ;
2011-02-03 22:16:54 +03:00
out_put_map :
2011-02-09 23:11:59 +03:00
if ( use_ptemod )
map - > vma = NULL ;
2011-02-03 22:16:54 +03:00
gntdev_put_map ( map ) ;
return err ;
2010-12-14 21:40:46 +03:00
}
static const struct file_operations gntdev_fops = {
. owner = THIS_MODULE ,
. open = gntdev_open ,
. release = gntdev_release ,
. mmap = gntdev_mmap ,
. unlocked_ioctl = gntdev_ioctl
} ;
static struct miscdevice gntdev_miscdev = {
. minor = MISC_DYNAMIC_MINOR ,
. name = " xen/gntdev " ,
. fops = & gntdev_fops ,
} ;
/* ------------------------------------------------------------------ */
static int __init gntdev_init ( void )
{
int err ;
if ( ! xen_domain ( ) )
return - ENODEV ;
2011-02-03 20:19:02 +03:00
use_ptemod = xen_pv_domain ( ) ;
2010-12-14 21:40:46 +03:00
err = misc_register ( & gntdev_miscdev ) ;
if ( err ! = 0 ) {
printk ( KERN_ERR " Could not register gntdev device \n " ) ;
return err ;
}
return 0 ;
}
static void __exit gntdev_exit ( void )
{
misc_deregister ( & gntdev_miscdev ) ;
}
module_init ( gntdev_init ) ;
module_exit ( gntdev_exit ) ;
/* ------------------------------------------------------------------ */