2013-09-09 10:02:56 +10:00
/*
* Copyright ( C ) 2015 Red Hat , Inc .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining
* a copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sublicense , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial
* portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND ,
* EXPRESS OR IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY , FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT .
* IN NO EVENT SHALL THE COPYRIGHT OWNER ( S ) AND / OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM , DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION
* OF CONTRACT , TORT OR OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
# include "virtgpu_drv.h"
# include <drm/drm_plane_helper.h>
# include <drm/drm_atomic_helper.h>
static const uint32_t virtio_gpu_formats [ ] = {
DRM_FORMAT_XRGB8888 ,
DRM_FORMAT_ARGB8888 ,
DRM_FORMAT_BGRX8888 ,
DRM_FORMAT_BGRA8888 ,
DRM_FORMAT_RGBX8888 ,
DRM_FORMAT_RGBA8888 ,
DRM_FORMAT_XBGR8888 ,
DRM_FORMAT_ABGR8888 ,
} ;
2016-05-26 11:42:52 +02:00
static const uint32_t virtio_gpu_cursor_formats [ ] = {
DRM_FORMAT_ARGB8888 ,
} ;
2013-09-09 10:02:56 +10:00
static void virtio_gpu_plane_destroy ( struct drm_plane * plane )
{
kfree ( plane ) ;
}
static const struct drm_plane_funcs virtio_gpu_plane_funcs = {
. update_plane = drm_atomic_helper_update_plane ,
. disable_plane = drm_atomic_helper_disable_plane ,
. destroy = virtio_gpu_plane_destroy ,
. reset = drm_atomic_helper_plane_reset ,
. atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state ,
. atomic_destroy_state = drm_atomic_helper_plane_destroy_state ,
} ;
static int virtio_gpu_plane_atomic_check ( struct drm_plane * plane ,
struct drm_plane_state * state )
{
return 0 ;
}
2016-05-26 11:42:52 +02:00
static void virtio_gpu_primary_plane_update ( struct drm_plane * plane ,
struct drm_plane_state * old_state )
2013-09-09 10:02:56 +10:00
{
struct drm_device * dev = plane - > dev ;
struct virtio_gpu_device * vgdev = dev - > dev_private ;
2016-05-27 14:20:24 +02:00
struct virtio_gpu_output * output = NULL ;
2013-09-09 10:02:56 +10:00
struct virtio_gpu_framebuffer * vgfb ;
struct virtio_gpu_object * bo ;
uint32_t handle ;
2016-05-27 14:20:24 +02:00
if ( plane - > state - > crtc )
output = drm_crtc_to_virtio_gpu_output ( plane - > state - > crtc ) ;
if ( old_state - > crtc )
output = drm_crtc_to_virtio_gpu_output ( old_state - > crtc ) ;
2016-08-21 23:06:06 +02:00
if ( WARN_ON ( ! output ) )
return ;
2016-05-27 14:20:24 +02:00
2016-01-13 15:52:07 -06:00
if ( plane - > state - > fb ) {
vgfb = to_virtio_gpu_framebuffer ( plane - > state - > fb ) ;
2013-09-09 10:02:56 +10:00
bo = gem_to_virtio_gpu_obj ( vgfb - > obj ) ;
handle = bo - > hw_res_handle ;
2016-01-13 15:52:09 -06:00
if ( bo - > dumb ) {
virtio_gpu_cmd_transfer_to_host_2d
( vgdev , handle , 0 ,
2016-05-31 14:20:22 +02:00
cpu_to_le32 ( plane - > state - > src_w > > 16 ) ,
cpu_to_le32 ( plane - > state - > src_h > > 16 ) ,
2016-12-05 21:44:39 +02:00
cpu_to_le32 ( plane - > state - > src_x > > 16 ) ,
cpu_to_le32 ( plane - > state - > src_y > > 16 ) , NULL ) ;
2016-01-13 15:52:09 -06:00
}
2013-09-09 10:02:56 +10:00
} else {
handle = 0 ;
}
2016-05-31 14:20:22 +02:00
DRM_DEBUG ( " handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d \n " , handle ,
2013-09-09 10:02:56 +10:00
plane - > state - > crtc_w , plane - > state - > crtc_h ,
2016-05-31 14:20:22 +02:00
plane - > state - > crtc_x , plane - > state - > crtc_y ,
plane - > state - > src_w > > 16 ,
plane - > state - > src_h > > 16 ,
plane - > state - > src_x > > 16 ,
plane - > state - > src_y > > 16 ) ;
2013-09-09 10:02:56 +10:00
virtio_gpu_cmd_set_scanout ( vgdev , output - > index , handle ,
2016-05-31 14:20:22 +02:00
plane - > state - > src_w > > 16 ,
plane - > state - > src_h > > 16 ,
plane - > state - > src_x > > 16 ,
plane - > state - > src_y > > 16 ) ;
2016-01-13 15:52:08 -06:00
virtio_gpu_cmd_resource_flush ( vgdev , handle ,
2016-05-31 14:20:22 +02:00
plane - > state - > src_x > > 16 ,
plane - > state - > src_y > > 16 ,
plane - > state - > src_w > > 16 ,
plane - > state - > src_h > > 16 ) ;
2013-09-09 10:02:56 +10:00
}
2016-05-26 11:42:52 +02:00
static void virtio_gpu_cursor_plane_update ( struct drm_plane * plane ,
struct drm_plane_state * old_state )
{
struct drm_device * dev = plane - > dev ;
struct virtio_gpu_device * vgdev = dev - > dev_private ;
struct virtio_gpu_output * output = NULL ;
struct virtio_gpu_framebuffer * vgfb ;
struct virtio_gpu_fence * fence = NULL ;
struct virtio_gpu_object * bo = NULL ;
uint32_t handle ;
int ret = 0 ;
2013-09-09 10:02:56 +10:00
2016-05-26 11:42:52 +02:00
if ( plane - > state - > crtc )
output = drm_crtc_to_virtio_gpu_output ( plane - > state - > crtc ) ;
if ( old_state - > crtc )
output = drm_crtc_to_virtio_gpu_output ( old_state - > crtc ) ;
2016-08-21 23:06:06 +02:00
if ( WARN_ON ( ! output ) )
return ;
2016-05-26 11:42:52 +02:00
if ( plane - > state - > fb ) {
vgfb = to_virtio_gpu_framebuffer ( plane - > state - > fb ) ;
bo = gem_to_virtio_gpu_obj ( vgfb - > obj ) ;
handle = bo - > hw_res_handle ;
} else {
handle = 0 ;
}
if ( bo & & bo - > dumb & & ( plane - > state - > fb ! = old_state - > fb ) ) {
/* new cursor -- update & wait */
virtio_gpu_cmd_transfer_to_host_2d
( vgdev , handle , 0 ,
cpu_to_le32 ( plane - > state - > crtc_w ) ,
cpu_to_le32 ( plane - > state - > crtc_h ) ,
0 , 0 , & fence ) ;
ret = virtio_gpu_object_reserve ( bo , false ) ;
if ( ! ret ) {
reservation_object_add_excl_fence ( bo - > tbo . resv ,
& fence - > f ) ;
2016-10-25 13:00:45 +01:00
dma_fence_put ( & fence - > f ) ;
2016-05-26 11:42:52 +02:00
fence = NULL ;
virtio_gpu_object_unreserve ( bo ) ;
virtio_gpu_object_wait ( bo , false ) ;
}
}
if ( plane - > state - > fb ! = old_state - > fb ) {
2016-05-31 09:36:21 +02:00
DRM_DEBUG ( " update, handle %d, pos +%d+%d, hot %d,%d \n " , handle ,
2016-05-26 11:42:52 +02:00
plane - > state - > crtc_x ,
2016-05-31 09:36:21 +02:00
plane - > state - > crtc_y ,
plane - > state - > fb ? plane - > state - > fb - > hot_x : 0 ,
plane - > state - > fb ? plane - > state - > fb - > hot_y : 0 ) ;
2016-05-26 11:42:52 +02:00
output - > cursor . hdr . type =
cpu_to_le32 ( VIRTIO_GPU_CMD_UPDATE_CURSOR ) ;
output - > cursor . resource_id = cpu_to_le32 ( handle ) ;
2016-05-31 09:36:21 +02:00
if ( plane - > state - > fb ) {
output - > cursor . hot_x =
cpu_to_le32 ( plane - > state - > fb - > hot_x ) ;
output - > cursor . hot_y =
cpu_to_le32 ( plane - > state - > fb - > hot_y ) ;
} else {
output - > cursor . hot_x = cpu_to_le32 ( 0 ) ;
output - > cursor . hot_y = cpu_to_le32 ( 0 ) ;
}
2016-05-26 11:42:52 +02:00
} else {
DRM_DEBUG ( " move +%d+%d \n " ,
plane - > state - > crtc_x ,
plane - > state - > crtc_y ) ;
output - > cursor . hdr . type =
cpu_to_le32 ( VIRTIO_GPU_CMD_MOVE_CURSOR ) ;
}
output - > cursor . pos . x = cpu_to_le32 ( plane - > state - > crtc_x ) ;
output - > cursor . pos . y = cpu_to_le32 ( plane - > state - > crtc_y ) ;
virtio_gpu_cursor_ping ( vgdev , output ) ;
}
static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
2013-09-09 10:02:56 +10:00
. atomic_check = virtio_gpu_plane_atomic_check ,
2016-05-26 11:42:52 +02:00
. atomic_update = virtio_gpu_primary_plane_update ,
} ;
static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
. atomic_check = virtio_gpu_plane_atomic_check ,
. atomic_update = virtio_gpu_cursor_plane_update ,
2013-09-09 10:02:56 +10:00
} ;
struct drm_plane * virtio_gpu_plane_init ( struct virtio_gpu_device * vgdev ,
2016-05-26 11:42:52 +02:00
enum drm_plane_type type ,
2013-09-09 10:02:56 +10:00
int index )
{
struct drm_device * dev = vgdev - > ddev ;
2016-05-26 11:42:52 +02:00
const struct drm_plane_helper_funcs * funcs ;
2013-09-09 10:02:56 +10:00
struct drm_plane * plane ;
2016-05-26 11:42:52 +02:00
const uint32_t * formats ;
int ret , nformats ;
2013-09-09 10:02:56 +10:00
plane = kzalloc ( sizeof ( * plane ) , GFP_KERNEL ) ;
if ( ! plane )
return ERR_PTR ( - ENOMEM ) ;
2016-05-26 11:42:52 +02:00
if ( type = = DRM_PLANE_TYPE_CURSOR ) {
formats = virtio_gpu_cursor_formats ;
nformats = ARRAY_SIZE ( virtio_gpu_cursor_formats ) ;
funcs = & virtio_gpu_cursor_helper_funcs ;
} else {
formats = virtio_gpu_formats ;
nformats = ARRAY_SIZE ( virtio_gpu_formats ) ;
funcs = & virtio_gpu_primary_helper_funcs ;
}
2013-09-09 10:02:56 +10:00
ret = drm_universal_plane_init ( dev , plane , 1 < < index ,
& virtio_gpu_plane_funcs ,
2016-05-26 11:42:52 +02:00
formats , nformats ,
type , NULL ) ;
2013-09-09 10:02:56 +10:00
if ( ret )
goto err_plane_init ;
2016-05-26 11:42:52 +02:00
drm_plane_helper_add ( plane , funcs ) ;
2013-09-09 10:02:56 +10:00
return plane ;
err_plane_init :
kfree ( plane ) ;
return ERR_PTR ( ret ) ;
}