2013-09-09 10:02:56 +10:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
2019-06-30 08:19:16 +02:00
# include <drm/drm_file.h>
# include <drm/drm_fourcc.h>
2013-09-09 10:02:56 +10:00
# include "virtgpu_drv.h"
void virtio_gpu_gem_free_object ( struct drm_gem_object * gem_obj )
{
struct virtio_gpu_object * obj = gem_to_virtio_gpu_obj ( gem_obj ) ;
if ( obj )
virtio_gpu_object_unref ( & obj ) ;
}
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object *
virtio_gpu_alloc_object ( struct drm_device * dev ,
2019-03-18 12:33:32 +01:00
struct virtio_gpu_object_params * params ,
struct virtio_gpu_fence * fence )
2013-09-09 10:02:56 +10:00
{
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_object * obj ;
int ret ;
2019-03-18 12:33:32 +01:00
ret = virtio_gpu_object_create ( vgdev , params , & obj , fence ) ;
2013-09-09 10:02:56 +10:00
if ( ret )
return ERR_PTR ( ret ) ;
return obj ;
}
int virtio_gpu_gem_create ( struct drm_file * file ,
struct drm_device * dev ,
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params * params ,
2013-09-09 10:02:56 +10:00
struct drm_gem_object * * obj_p ,
uint32_t * handle_p )
{
struct virtio_gpu_object * obj ;
int ret ;
u32 handle ;
2019-03-18 12:33:32 +01:00
obj = virtio_gpu_alloc_object ( dev , params , NULL ) ;
2013-09-09 10:02:56 +10:00
if ( IS_ERR ( obj ) )
return PTR_ERR ( obj ) ;
ret = drm_gem_handle_create ( file , & obj - > gem_base , & handle ) ;
if ( ret ) {
drm_gem_object_release ( & obj - > gem_base ) ;
return ret ;
}
* obj_p = & obj - > gem_base ;
/* drop reference from allocate - handle holds it now */
2017-09-29 15:33:39 +05:30
drm_gem_object_put_unlocked ( & obj - > gem_base ) ;
2013-09-09 10:02:56 +10:00
* handle_p = handle ;
return 0 ;
}
int virtio_gpu_mode_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args )
{
struct drm_gem_object * gobj ;
2019-03-18 12:33:29 +01:00
struct virtio_gpu_object_params params = { 0 } ;
2013-09-09 10:02:56 +10:00
int ret ;
uint32_t pitch ;
2018-09-21 15:47:03 +02:00
if ( args - > bpp ! = 32 )
return - EINVAL ;
pitch = args - > width * 4 ;
2013-09-09 10:02:56 +10:00
args - > size = pitch * args - > height ;
args - > size = ALIGN ( args - > size , PAGE_SIZE ) ;
2019-03-18 12:33:30 +01:00
params . format = virtio_gpu_translate_format ( DRM_FORMAT_HOST_XRGB8888 ) ;
params . width = args - > width ;
params . height = args - > height ;
2019-03-18 12:33:29 +01:00
params . size = args - > size ;
2019-03-18 12:33:32 +01:00
params . dumb = true ;
2019-03-18 12:33:29 +01:00
ret = virtio_gpu_gem_create ( file_priv , dev , & params , & gobj ,
2013-09-09 10:02:56 +10:00
& args - > handle ) ;
if ( ret )
goto fail ;
args - > pitch = pitch ;
return ret ;
fail :
return ret ;
}
int virtio_gpu_mode_dumb_mmap ( struct drm_file * file_priv ,
struct drm_device * dev ,
uint32_t handle , uint64_t * offset_p )
{
struct drm_gem_object * gobj ;
struct virtio_gpu_object * obj ;
2018-02-22 21:00:00 -03:00
2013-09-09 10:02:56 +10:00
BUG_ON ( ! offset_p ) ;
2016-05-09 11:04:54 +01:00
gobj = drm_gem_object_lookup ( file_priv , handle ) ;
2013-09-09 10:02:56 +10:00
if ( gobj = = NULL )
return - ENOENT ;
obj = gem_to_virtio_gpu_obj ( gobj ) ;
* offset_p = virtio_gpu_object_mmap_offset ( obj ) ;
2017-09-29 15:33:39 +05:30
drm_gem_object_put_unlocked ( gobj ) ;
2013-09-09 10:02:56 +10:00
return 0 ;
}
2014-10-28 12:48:00 +01:00
int virtio_gpu_gem_object_open ( struct drm_gem_object * obj ,
struct drm_file * file )
{
struct virtio_gpu_device * vgdev = obj - > dev - > dev_private ;
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
struct virtio_gpu_object * qobj = gem_to_virtio_gpu_obj ( obj ) ;
int r ;
if ( ! vgdev - > has_virgl_3d )
return 0 ;
2019-08-29 12:32:48 +02:00
r = virtio_gpu_object_reserve ( qobj ) ;
2014-10-28 12:48:00 +01:00
if ( r )
return r ;
virtio_gpu_cmd_context_attach_resource ( vgdev , vfpriv - > ctx_id ,
qobj - > hw_res_handle ) ;
virtio_gpu_object_unreserve ( qobj ) ;
return 0 ;
}
void virtio_gpu_gem_object_close ( struct drm_gem_object * obj ,
struct drm_file * file )
{
struct virtio_gpu_device * vgdev = obj - > dev - > dev_private ;
struct virtio_gpu_fpriv * vfpriv = file - > driver_priv ;
struct virtio_gpu_object * qobj = gem_to_virtio_gpu_obj ( obj ) ;
int r ;
if ( ! vgdev - > has_virgl_3d )
return ;
2019-08-29 12:32:48 +02:00
r = virtio_gpu_object_reserve ( qobj ) ;
2014-10-28 12:48:00 +01:00
if ( r )
return ;
virtio_gpu_cmd_context_detach_resource ( vgdev , vfpriv - > ctx_id ,
qobj - > hw_res_handle ) ;
virtio_gpu_object_unreserve ( qobj ) ;
}
2019-08-29 12:32:50 +02:00
struct virtio_gpu_object_array * virtio_gpu_array_alloc ( u32 nents )
{
struct virtio_gpu_object_array * objs ;
size_t size = sizeof ( * objs ) + sizeof ( objs - > objs [ 0 ] ) * nents ;
objs = kmalloc ( size , GFP_KERNEL ) ;
if ( ! objs )
return NULL ;
objs - > nents = 0 ;
objs - > total = nents ;
return objs ;
}
static void virtio_gpu_array_free ( struct virtio_gpu_object_array * objs )
{
kfree ( objs ) ;
}
struct virtio_gpu_object_array *
virtio_gpu_array_from_handles ( struct drm_file * drm_file , u32 * handles , u32 nents )
{
struct virtio_gpu_object_array * objs ;
u32 i ;
objs = virtio_gpu_array_alloc ( nents ) ;
if ( ! objs )
return NULL ;
for ( i = 0 ; i < nents ; i + + ) {
objs - > objs [ i ] = drm_gem_object_lookup ( drm_file , handles [ i ] ) ;
if ( ! objs - > objs [ i ] ) {
objs - > nents = i ;
virtio_gpu_array_put_free ( objs ) ;
return NULL ;
}
}
objs - > nents = i ;
return objs ;
}
void virtio_gpu_array_add_obj ( struct virtio_gpu_object_array * objs ,
struct drm_gem_object * obj )
{
if ( WARN_ON_ONCE ( objs - > nents = = objs - > total ) )
return ;
drm_gem_object_get ( obj ) ;
objs - > objs [ objs - > nents ] = obj ;
objs - > nents + + ;
}
int virtio_gpu_array_lock_resv ( struct virtio_gpu_object_array * objs )
{
int ret ;
if ( objs - > nents = = 1 ) {
ret = dma_resv_lock_interruptible ( objs - > objs [ 0 ] - > resv , NULL ) ;
} else {
ret = drm_gem_lock_reservations ( objs - > objs , objs - > nents ,
& objs - > ticket ) ;
}
return ret ;
}
void virtio_gpu_array_unlock_resv ( struct virtio_gpu_object_array * objs )
{
if ( objs - > nents = = 1 ) {
dma_resv_unlock ( objs - > objs [ 0 ] - > resv ) ;
} else {
drm_gem_unlock_reservations ( objs - > objs , objs - > nents ,
& objs - > ticket ) ;
}
}
void virtio_gpu_array_add_fence ( struct virtio_gpu_object_array * objs ,
struct dma_fence * fence )
{
int i ;
for ( i = 0 ; i < objs - > nents ; i + + )
dma_resv_add_excl_fence ( objs - > objs [ i ] - > resv , fence ) ;
}
void virtio_gpu_array_put_free ( struct virtio_gpu_object_array * objs )
{
u32 i ;
for ( i = 0 ; i < objs - > nents ; i + + )
drm_gem_object_put_unlocked ( objs - > objs [ i ] ) ;
virtio_gpu_array_free ( objs ) ;
}