2015-03-04 16:33:41 -08:00
/*
* Copyright 2011 Red Hat , Inc .
* Copyright © 2014 The Chromium OS Authors
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " )
* to deal in the software without restriction , including without limitation
* on the rights to use , copy , modify , merge , publish , distribute , sub
* license , and / or sell copies of the Software , and to permit persons to whom
* them Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM , DAMAGES , OR OTHER LIABILITY , WHETHER
* IN AN ACTION OF CONTRACT , TORT , OR OTHERWISE , ARISING FROM , OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* Authors :
* Adam Jackson < ajax @ redhat . com >
* Ben Widawsky < ben @ bwidawsk . net >
*/
/**
* This is vgem , a ( non - hardware - backed ) GEM service . This is used by Mesa ' s
* software renderer and the X server for efficient buffer sharing .
*/
# include <linux/module.h>
# include <linux/ramfs.h>
# include <linux/shmem_fs.h>
# include <linux/dma-buf.h>
# include "vgem_drv.h"
# define DRIVER_NAME "vgem"
# define DRIVER_DESC "Virtual GEM provider"
# define DRIVER_DATE "20120112"
# define DRIVER_MAJOR 1
# define DRIVER_MINOR 0
2017-05-08 14:22:28 +01:00
static struct vgem_device {
struct drm_device drm ;
struct platform_device * platform ;
} * vgem_device ;
2017-05-04 11:45:46 -07:00
2015-03-04 16:33:41 -08:00
static void vgem_gem_free_object ( struct drm_gem_object * obj )
{
struct drm_vgem_gem_object * vgem_obj = to_vgem_bo ( obj ) ;
2017-05-17 14:23:12 +02:00
kvfree ( vgem_obj - > pages ) ;
2017-06-22 14:46:17 +01:00
mutex_destroy ( & vgem_obj - > pages_lock ) ;
2017-05-04 11:45:48 -07:00
if ( obj - > import_attach )
drm_prime_gem_destroy ( obj , vgem_obj - > table ) ;
2015-03-04 16:33:41 -08:00
drm_gem_object_release ( obj ) ;
kfree ( vgem_obj ) ;
}
2018-04-16 20:32:32 +05:30
static vm_fault_t vgem_gem_fault ( struct vm_fault * vmf )
2015-03-04 16:33:41 -08:00
{
2017-02-24 14:56:41 -08:00
struct vm_area_struct * vma = vmf - > vma ;
2015-03-04 16:33:41 -08:00
struct drm_vgem_gem_object * obj = vma - > vm_private_data ;
/* We don't use vmf->pgoff since that has the fake offset */
2016-12-14 15:07:01 -08:00
unsigned long vaddr = vmf - > address ;
2018-04-16 20:32:32 +05:30
vm_fault_t ret = VM_FAULT_SIGBUS ;
2017-05-04 11:45:48 -07:00
loff_t num_pages ;
pgoff_t page_offset ;
page_offset = ( vaddr - vma - > vm_start ) > > PAGE_SHIFT ;
num_pages = DIV_ROUND_UP ( obj - > base . size , PAGE_SIZE ) ;
2018-07-03 15:29:21 +03:00
if ( page_offset > = num_pages )
2017-05-04 11:45:48 -07:00
return VM_FAULT_SIGBUS ;
2017-06-22 14:46:17 +01:00
mutex_lock ( & obj - > pages_lock ) ;
2017-05-04 11:45:48 -07:00
if ( obj - > pages ) {
get_page ( obj - > pages [ page_offset ] ) ;
vmf - > page = obj - > pages [ page_offset ] ;
ret = 0 ;
2017-06-22 14:46:17 +01:00
}
mutex_unlock ( & obj - > pages_lock ) ;
if ( ret ) {
2017-05-04 11:45:48 -07:00
struct page * page ;
page = shmem_read_mapping_page (
file_inode ( obj - > base . filp ) - > i_mapping ,
page_offset ) ;
if ( ! IS_ERR ( page ) ) {
vmf - > page = page ;
ret = 0 ;
} else switch ( PTR_ERR ( page ) ) {
case - ENOSPC :
case - ENOMEM :
ret = VM_FAULT_OOM ;
break ;
case - EBUSY :
ret = VM_FAULT_RETRY ;
break ;
case - EFAULT :
case - EINVAL :
ret = VM_FAULT_SIGBUS ;
break ;
default :
WARN_ON ( PTR_ERR ( page ) ) ;
ret = VM_FAULT_SIGBUS ;
break ;
}
2015-03-04 16:33:41 -08:00
}
2017-05-04 11:45:48 -07:00
return ret ;
2015-03-04 16:33:41 -08:00
}
2015-09-09 15:39:26 -07:00
static const struct vm_operations_struct vgem_gem_vm_ops = {
2015-03-04 16:33:41 -08:00
. fault = vgem_gem_fault ,
. open = drm_gem_vm_open ,
. close = drm_gem_vm_close ,
} ;
2016-07-15 09:31:11 +01:00
static int vgem_open ( struct drm_device * dev , struct drm_file * file )
{
struct vgem_file * vfile ;
int ret ;
vfile = kzalloc ( sizeof ( * vfile ) , GFP_KERNEL ) ;
if ( ! vfile )
return - ENOMEM ;
file - > driver_priv = vfile ;
ret = vgem_fence_open ( vfile ) ;
if ( ret ) {
kfree ( vfile ) ;
return ret ;
}
return 0 ;
}
2017-03-08 15:12:50 +01:00
static void vgem_postclose ( struct drm_device * dev , struct drm_file * file )
2016-07-15 09:31:11 +01:00
{
struct vgem_file * vfile = file - > driver_priv ;
vgem_fence_close ( vfile ) ;
kfree ( vfile ) ;
}
2017-05-04 11:45:48 -07:00
static struct drm_vgem_gem_object * __vgem_gem_create ( struct drm_device * dev ,
unsigned long size )
2015-03-04 16:33:41 -08:00
{
struct drm_vgem_gem_object * obj ;
2016-06-23 15:35:32 +01:00
int ret ;
2015-03-04 16:33:41 -08:00
obj = kzalloc ( sizeof ( * obj ) , GFP_KERNEL ) ;
if ( ! obj )
return ERR_PTR ( - ENOMEM ) ;
2016-06-23 15:35:32 +01:00
ret = drm_gem_object_init ( dev , & obj - > base , roundup ( size , PAGE_SIZE ) ) ;
2017-05-04 11:45:48 -07:00
if ( ret ) {
kfree ( obj ) ;
return ERR_PTR ( ret ) ;
}
2017-06-22 14:46:17 +01:00
mutex_init ( & obj - > pages_lock ) ;
2017-05-04 11:45:48 -07:00
return obj ;
}
static void __vgem_gem_destroy ( struct drm_vgem_gem_object * obj )
{
drm_gem_object_release ( & obj - > base ) ;
kfree ( obj ) ;
}
static struct drm_gem_object * vgem_gem_create ( struct drm_device * dev ,
struct drm_file * file ,
unsigned int * handle ,
unsigned long size )
{
struct drm_vgem_gem_object * obj ;
int ret ;
obj = __vgem_gem_create ( dev , size ) ;
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
2015-03-04 16:33:41 -08:00
2016-06-23 15:35:32 +01:00
ret = drm_gem_handle_create ( file , & obj - > base , handle ) ;
2017-08-11 15:33:11 +03:00
drm_gem_object_put_unlocked ( & obj - > base ) ;
2016-06-23 15:35:32 +01:00
if ( ret )
goto err ;
2015-03-04 16:33:41 -08:00
2016-06-23 15:35:32 +01:00
return & obj - > base ;
2015-03-04 16:33:41 -08:00
2016-06-23 15:35:32 +01:00
err :
2017-05-04 11:45:48 -07:00
__vgem_gem_destroy ( obj ) ;
2016-06-23 15:35:32 +01:00
return ERR_PTR ( ret ) ;
2015-03-04 16:33:41 -08:00
}
static int vgem_gem_dumb_create ( struct drm_file * file , struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct drm_gem_object * gem_object ;
2016-06-23 15:35:32 +01:00
u64 pitch , size ;
2015-03-04 16:33:41 -08:00
2016-06-23 15:35:32 +01:00
pitch = args - > width * DIV_ROUND_UP ( args - > bpp , 8 ) ;
2015-03-04 16:33:41 -08:00
size = args - > height * pitch ;
if ( size = = 0 )
return - EINVAL ;
gem_object = vgem_gem_create ( dev , file , & args - > handle , size ) ;
2016-06-23 15:35:32 +01:00
if ( IS_ERR ( gem_object ) )
2015-03-04 16:33:41 -08:00
return PTR_ERR ( gem_object ) ;
args - > size = gem_object - > size ;
args - > pitch = pitch ;
DRM_DEBUG_DRIVER ( " Created object of size %lld \n " , size ) ;
return 0 ;
}
2016-06-23 15:35:32 +01:00
static int vgem_gem_dumb_map ( struct drm_file * file , struct drm_device * dev ,
uint32_t handle , uint64_t * offset )
2015-03-04 16:33:41 -08:00
{
struct drm_gem_object * obj ;
2016-06-23 15:35:32 +01:00
int ret ;
2015-03-04 16:33:41 -08:00
2016-05-09 11:04:54 +01:00
obj = drm_gem_object_lookup ( file , handle ) ;
2016-03-30 11:40:51 +02:00
if ( ! obj )
return - ENOENT ;
2015-03-04 16:33:41 -08:00
2016-06-23 15:35:32 +01:00
if ( ! obj - > filp ) {
ret = - EINVAL ;
goto unref ;
}
2016-03-30 11:40:49 +02:00
ret = drm_gem_create_mmap_offset ( obj ) ;
if ( ret )
goto unref ;
2015-03-04 16:33:41 -08:00
* offset = drm_vma_node_offset_addr ( & obj - > vma_node ) ;
unref :
2017-08-11 15:33:11 +03:00
drm_gem_object_put_unlocked ( obj ) ;
2016-03-30 11:40:51 +02:00
2015-03-04 16:33:41 -08:00
return ret ;
}
static struct drm_ioctl_desc vgem_ioctls [ ] = {
2016-07-15 09:31:11 +01:00
DRM_IOCTL_DEF_DRV ( VGEM_FENCE_ATTACH , vgem_fence_attach_ioctl , DRM_AUTH | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( VGEM_FENCE_SIGNAL , vgem_fence_signal_ioctl , DRM_AUTH | DRM_RENDER_ALLOW ) ,
2015-03-04 16:33:41 -08:00
} ;
2016-06-23 15:35:32 +01:00
static int vgem_mmap ( struct file * filp , struct vm_area_struct * vma )
{
unsigned long flags = vma - > vm_flags ;
int ret ;
ret = drm_gem_mmap ( filp , vma ) ;
if ( ret )
return ret ;
/* Keep the WC mmaping set by drm_gem_mmap() but our pages
* are ordinary and not special .
*/
vma - > vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP ;
return 0 ;
}
2015-03-04 16:33:41 -08:00
static const struct file_operations vgem_driver_fops = {
. owner = THIS_MODULE ,
. open = drm_open ,
2016-06-23 15:35:32 +01:00
. mmap = vgem_mmap ,
2015-03-04 16:33:41 -08:00
. poll = drm_poll ,
. read = drm_read ,
. unlocked_ioctl = drm_ioctl ,
2017-07-14 20:12:12 -07:00
. compat_ioctl = drm_compat_ioctl ,
2015-03-04 16:33:41 -08:00
. release = drm_release ,
} ;
2017-06-22 14:46:17 +01:00
static struct page * * vgem_pin_pages ( struct drm_vgem_gem_object * bo )
{
mutex_lock ( & bo - > pages_lock ) ;
if ( bo - > pages_pin_count + + = = 0 ) {
struct page * * pages ;
pages = drm_gem_get_pages ( & bo - > base ) ;
if ( IS_ERR ( pages ) ) {
bo - > pages_pin_count - - ;
mutex_unlock ( & bo - > pages_lock ) ;
return pages ;
}
bo - > pages = pages ;
}
mutex_unlock ( & bo - > pages_lock ) ;
return bo - > pages ;
}
static void vgem_unpin_pages ( struct drm_vgem_gem_object * bo )
{
mutex_lock ( & bo - > pages_lock ) ;
if ( - - bo - > pages_pin_count = = 0 ) {
drm_gem_put_pages ( & bo - > base , bo - > pages , true , true ) ;
bo - > pages = NULL ;
}
mutex_unlock ( & bo - > pages_lock ) ;
}
2016-07-11 14:08:07 +01:00
static int vgem_prime_pin ( struct drm_gem_object * obj )
{
2017-06-22 14:46:17 +01:00
struct drm_vgem_gem_object * bo = to_vgem_bo ( obj ) ;
2016-07-11 14:08:07 +01:00
long n_pages = obj - > size > > PAGE_SHIFT ;
struct page * * pages ;
2017-06-22 14:46:17 +01:00
pages = vgem_pin_pages ( bo ) ;
2016-07-11 14:08:07 +01:00
if ( IS_ERR ( pages ) )
return PTR_ERR ( pages ) ;
2017-06-22 14:46:17 +01:00
/* Flush the object from the CPU cache so that importers can rely
* on coherent indirect access via the exported dma - address .
*/
2016-07-11 14:08:07 +01:00
drm_clflush_pages ( pages , n_pages ) ;
return 0 ;
}
2017-06-22 14:46:17 +01:00
static void vgem_prime_unpin ( struct drm_gem_object * obj )
2016-07-11 14:08:07 +01:00
{
2017-06-22 14:46:17 +01:00
struct drm_vgem_gem_object * bo = to_vgem_bo ( obj ) ;
2016-07-11 14:08:07 +01:00
2017-06-22 14:46:17 +01:00
vgem_unpin_pages ( bo ) ;
}
2016-07-11 14:08:07 +01:00
2017-06-22 14:46:17 +01:00
static struct sg_table * vgem_prime_get_sg_table ( struct drm_gem_object * obj )
{
struct drm_vgem_gem_object * bo = to_vgem_bo ( obj ) ;
2016-07-11 14:08:07 +01:00
2017-06-22 14:46:17 +01:00
return drm_prime_pages_to_sg ( bo - > pages , bo - > base . size > > PAGE_SHIFT ) ;
2016-07-11 14:08:07 +01:00
}
2017-05-04 11:45:48 -07:00
static struct drm_gem_object * vgem_prime_import ( struct drm_device * dev ,
struct dma_buf * dma_buf )
{
2017-05-08 14:22:28 +01:00
struct vgem_device * vgem = container_of ( dev , typeof ( * vgem ) , drm ) ;
return drm_gem_prime_import_dev ( dev , dma_buf , & vgem - > platform - > dev ) ;
2017-05-04 11:45:48 -07:00
}
static struct drm_gem_object * vgem_prime_import_sg_table ( struct drm_device * dev ,
struct dma_buf_attachment * attach , struct sg_table * sg )
{
struct drm_vgem_gem_object * obj ;
int npages ;
obj = __vgem_gem_create ( dev , attach - > dmabuf - > size ) ;
if ( IS_ERR ( obj ) )
return ERR_CAST ( obj ) ;
npages = PAGE_ALIGN ( attach - > dmabuf - > size ) / PAGE_SIZE ;
obj - > table = sg ;
2017-05-17 14:23:12 +02:00
obj - > pages = kvmalloc_array ( npages , sizeof ( struct page * ) , GFP_KERNEL ) ;
2017-05-04 11:45:48 -07:00
if ( ! obj - > pages ) {
__vgem_gem_destroy ( obj ) ;
return ERR_PTR ( - ENOMEM ) ;
}
2017-06-22 14:46:17 +01:00
obj - > pages_pin_count + + ; /* perma-pinned */
2017-05-04 11:45:48 -07:00
drm_prime_sg_to_page_addr_arrays ( obj - > table , obj - > pages , NULL ,
npages ) ;
return & obj - > base ;
}
2016-07-11 14:08:07 +01:00
static void * vgem_prime_vmap ( struct drm_gem_object * obj )
{
2017-06-22 14:46:17 +01:00
struct drm_vgem_gem_object * bo = to_vgem_bo ( obj ) ;
2016-07-11 14:08:07 +01:00
long n_pages = obj - > size > > PAGE_SHIFT ;
struct page * * pages ;
2017-06-22 14:46:17 +01:00
pages = vgem_pin_pages ( bo ) ;
2016-07-11 14:08:07 +01:00
if ( IS_ERR ( pages ) )
return NULL ;
2017-06-22 14:46:17 +01:00
return vmap ( pages , n_pages , 0 , pgprot_writecombine ( PAGE_KERNEL ) ) ;
2016-07-11 14:08:07 +01:00
}
static void vgem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr )
{
2017-06-22 14:46:17 +01:00
struct drm_vgem_gem_object * bo = to_vgem_bo ( obj ) ;
2016-07-11 14:08:07 +01:00
vunmap ( vaddr ) ;
2017-06-22 14:46:17 +01:00
vgem_unpin_pages ( bo ) ;
2016-07-11 14:08:07 +01:00
}
static int vgem_prime_mmap ( struct drm_gem_object * obj ,
struct vm_area_struct * vma )
{
int ret ;
if ( obj - > size < vma - > vm_end - vma - > vm_start )
return - EINVAL ;
if ( ! obj - > filp )
return - ENODEV ;
2017-02-20 16:51:23 +01:00
ret = call_mmap ( obj - > filp , vma ) ;
2016-07-11 14:08:07 +01:00
if ( ret )
return ret ;
fput ( vma - > vm_file ) ;
vma - > vm_file = get_file ( obj - > filp ) ;
vma - > vm_flags | = VM_DONTEXPAND | VM_DONTDUMP ;
vma - > vm_page_prot = pgprot_writecombine ( vm_get_page_prot ( vma - > vm_flags ) ) ;
return 0 ;
}
2017-05-08 14:22:28 +01:00
static void vgem_release ( struct drm_device * dev )
{
struct vgem_device * vgem = container_of ( dev , typeof ( * vgem ) , drm ) ;
platform_device_unregister ( vgem - > platform ) ;
drm_dev_fini ( & vgem - > drm ) ;
kfree ( vgem ) ;
}
2015-03-04 16:33:41 -08:00
static struct drm_driver vgem_driver = {
2016-07-11 14:08:07 +01:00
. driver_features = DRIVER_GEM | DRIVER_PRIME ,
2017-05-08 14:22:28 +01:00
. release = vgem_release ,
2016-07-15 09:31:11 +01:00
. open = vgem_open ,
2017-03-08 15:12:50 +01:00
. postclose = vgem_postclose ,
2016-05-30 19:53:08 +02:00
. gem_free_object_unlocked = vgem_gem_free_object ,
2015-03-04 16:33:41 -08:00
. gem_vm_ops = & vgem_gem_vm_ops ,
. ioctls = vgem_ioctls ,
2016-07-15 09:31:11 +01:00
. num_ioctls = ARRAY_SIZE ( vgem_ioctls ) ,
2015-03-04 16:33:41 -08:00
. fops = & vgem_driver_fops ,
2016-07-11 14:08:07 +01:00
2015-03-04 16:33:41 -08:00
. dumb_create = vgem_gem_dumb_create ,
. dumb_map_offset = vgem_gem_dumb_map ,
2016-07-11 14:08:07 +01:00
. prime_handle_to_fd = drm_gem_prime_handle_to_fd ,
2017-05-04 11:45:48 -07:00
. prime_fd_to_handle = drm_gem_prime_fd_to_handle ,
2016-07-11 14:08:07 +01:00
. gem_prime_pin = vgem_prime_pin ,
2017-06-22 14:46:17 +01:00
. gem_prime_unpin = vgem_prime_unpin ,
2017-05-04 11:45:48 -07:00
. gem_prime_import = vgem_prime_import ,
2016-07-11 14:08:07 +01:00
. gem_prime_export = drm_gem_prime_export ,
2017-05-04 11:45:48 -07:00
. gem_prime_import_sg_table = vgem_prime_import_sg_table ,
2016-07-11 14:08:07 +01:00
. gem_prime_get_sg_table = vgem_prime_get_sg_table ,
. gem_prime_vmap = vgem_prime_vmap ,
. gem_prime_vunmap = vgem_prime_vunmap ,
. gem_prime_mmap = vgem_prime_mmap ,
2015-03-04 16:33:41 -08:00
. name = DRIVER_NAME ,
. desc = DRIVER_DESC ,
. date = DRIVER_DATE ,
. major = DRIVER_MAJOR ,
. minor = DRIVER_MINOR ,
} ;
static int __init vgem_init ( void )
{
int ret ;
2017-05-08 14:22:28 +01:00
vgem_device = kzalloc ( sizeof ( * vgem_device ) , GFP_KERNEL ) ;
if ( ! vgem_device )
return - ENOMEM ;
2017-05-04 11:45:46 -07:00
2017-05-08 14:22:28 +01:00
ret = drm_dev_init ( & vgem_device - > drm , & vgem_driver , NULL ) ;
if ( ret )
goto out_free ;
2017-05-04 11:45:46 -07:00
2017-05-08 14:22:28 +01:00
vgem_device - > platform =
platform_device_register_simple ( " vgem " , - 1 , NULL , 0 ) ;
2017-05-21 01:19:39 +00:00
if ( IS_ERR ( vgem_device - > platform ) ) {
ret = PTR_ERR ( vgem_device - > platform ) ;
2017-05-08 14:22:28 +01:00
goto out_fini ;
2015-03-04 16:33:41 -08:00
}
2017-05-08 14:22:28 +01:00
dma_coerce_mask_and_coherent ( & vgem_device - > platform - > dev ,
DMA_BIT_MASK ( 64 ) ) ;
2017-05-04 11:45:46 -07:00
2017-05-08 14:22:28 +01:00
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register ( & vgem_device - > drm , 0 ) ;
2015-03-04 16:33:41 -08:00
if ( ret )
2017-05-08 14:22:28 +01:00
goto out_unregister ;
2015-03-04 16:33:41 -08:00
return 0 ;
2017-05-08 14:22:28 +01:00
out_unregister :
platform_device_unregister ( vgem_device - > platform ) ;
out_fini :
drm_dev_fini ( & vgem_device - > drm ) ;
out_free :
kfree ( vgem_device ) ;
2015-03-04 16:33:41 -08:00
return ret ;
}
static void __exit vgem_exit ( void )
{
2017-05-08 14:22:28 +01:00
drm_dev_unregister ( & vgem_device - > drm ) ;
drm_dev_unref ( & vgem_device - > drm ) ;
2015-03-04 16:33:41 -08:00
}
module_init ( vgem_init ) ;
module_exit ( vgem_exit ) ;
MODULE_AUTHOR ( " Red Hat, Inc. " ) ;
2016-07-15 09:31:11 +01:00
MODULE_AUTHOR ( " Intel Corporation " ) ;
2015-03-04 16:33:41 -08:00
MODULE_DESCRIPTION ( DRIVER_DESC ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;