2015-04-20 16:55:21 -04:00
/*
* Copyright © 2007 David Airlie
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE .
*
* Authors :
* David Airlie
*/
# include <linux/module.h>
# include <linux/slab.h>
2016-08-27 12:37:22 -04:00
# include <linux/pm_runtime.h>
2015-04-20 16:55:21 -04:00
# include <drm/drmP.h>
# include <drm/drm_crtc.h>
# include <drm/drm_crtc_helper.h>
# include <drm/amdgpu_drm.h>
# include "amdgpu.h"
2015-05-14 23:48:26 +02:00
# include "cikd.h"
2015-04-20 16:55:21 -04:00
# include <drm/drm_fb_helper.h>
# include <linux/vga_switcheroo.h>
/* object hierarchy -
this contains a helper + a amdgpu fb
the helper contains a pointer to amdgpu framebuffer baseclass .
*/
struct amdgpu_fbdev {
struct drm_fb_helper helper ;
struct amdgpu_framebuffer rfb ;
struct amdgpu_device * adev ;
} ;
2016-08-27 12:37:22 -04:00
static int
amdgpufb_open ( struct fb_info * info , int user )
{
struct amdgpu_fbdev * rfbdev = info - > par ;
struct amdgpu_device * adev = rfbdev - > adev ;
int ret = pm_runtime_get_sync ( adev - > ddev - > dev ) ;
if ( ret < 0 & & ret ! = - EACCES ) {
pm_runtime_mark_last_busy ( adev - > ddev - > dev ) ;
pm_runtime_put_autosuspend ( adev - > ddev - > dev ) ;
return ret ;
}
return 0 ;
}
static int
amdgpufb_release ( struct fb_info * info , int user )
{
struct amdgpu_fbdev * rfbdev = info - > par ;
struct amdgpu_device * adev = rfbdev - > adev ;
pm_runtime_mark_last_busy ( adev - > ddev - > dev ) ;
pm_runtime_put_autosuspend ( adev - > ddev - > dev ) ;
return 0 ;
}
2015-04-20 16:55:21 -04:00
static struct fb_ops amdgpufb_ops = {
. owner = THIS_MODULE ,
2016-11-14 00:03:13 +01:00
DRM_FB_HELPER_DEFAULT_OPS ,
2016-08-27 12:37:22 -04:00
. fb_open = amdgpufb_open ,
. fb_release = amdgpufb_release ,
2015-07-31 16:22:00 +05:30
. fb_fillrect = drm_fb_helper_cfb_fillrect ,
. fb_copyarea = drm_fb_helper_cfb_copyarea ,
. fb_imageblit = drm_fb_helper_cfb_imageblit ,
2015-04-20 16:55:21 -04:00
} ;
2016-10-18 01:41:17 +03:00
int amdgpu_align_pitch ( struct amdgpu_device * adev , int width , int cpp , bool tiled )
2015-04-20 16:55:21 -04:00
{
int aligned = width ;
int pitch_mask = 0 ;
2016-10-18 01:41:17 +03:00
switch ( cpp ) {
2015-04-20 16:55:21 -04:00
case 1 :
pitch_mask = 255 ;
break ;
case 2 :
pitch_mask = 127 ;
break ;
case 3 :
case 4 :
pitch_mask = 63 ;
break ;
}
aligned + = pitch_mask ;
aligned & = ~ pitch_mask ;
2016-10-18 01:41:17 +03:00
return aligned * cpp ;
2015-04-20 16:55:21 -04:00
}
static void amdgpufb_destroy_pinned_object ( struct drm_gem_object * gobj )
{
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo = gem_to_amdgpu_bo ( gobj ) ;
2015-04-20 16:55:21 -04:00
int ret ;
2017-04-28 17:28:14 +09:00
ret = amdgpu_bo_reserve ( abo , true ) ;
2015-04-20 16:55:21 -04:00
if ( likely ( ret = = 0 ) ) {
2016-09-15 15:06:50 +02:00
amdgpu_bo_kunmap ( abo ) ;
amdgpu_bo_unpin ( abo ) ;
amdgpu_bo_unreserve ( abo ) ;
2015-04-20 16:55:21 -04:00
}
drm_gem_object_unreference_unlocked ( gobj ) ;
}
static int amdgpufb_create_pinned_object ( struct amdgpu_fbdev * rfbdev ,
struct drm_mode_fb_cmd2 * mode_cmd ,
struct drm_gem_object * * gobj_p )
{
struct amdgpu_device * adev = rfbdev - > adev ;
struct drm_gem_object * gobj = NULL ;
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo = NULL ;
2015-04-20 16:55:21 -04:00
bool fb_tiled = false ; /* useful for testing */
u32 tiling_flags = 0 ;
int ret ;
int aligned_size , size ;
int height = mode_cmd - > height ;
2016-10-18 01:41:17 +03:00
u32 cpp ;
2015-04-20 16:55:21 -04:00
2016-10-18 01:41:17 +03:00
cpp = drm_format_plane_cpp ( mode_cmd - > pixel_format , 0 ) ;
2015-04-20 16:55:21 -04:00
/* need to align pitch with crtc limits */
2016-10-18 01:41:17 +03:00
mode_cmd - > pitches [ 0 ] = amdgpu_align_pitch ( adev , mode_cmd - > width , cpp ,
fb_tiled ) ;
2015-04-20 16:55:21 -04:00
height = ALIGN ( mode_cmd - > height , 8 ) ;
size = mode_cmd - > pitches [ 0 ] * height ;
aligned_size = ALIGN ( size , PAGE_SIZE ) ;
ret = amdgpu_gem_object_create ( adev , aligned_size , 0 ,
AMDGPU_GEM_DOMAIN_VRAM ,
2016-08-15 17:00:22 +02:00
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
2017-01-24 11:39:48 +08:00
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
AMDGPU_GEM_CREATE_VRAM_CLEARED ,
2015-08-27 00:14:16 -04:00
true , & gobj ) ;
2015-04-20 16:55:21 -04:00
if ( ret ) {
2017-02-28 04:55:52 -08:00
pr_err ( " failed to allocate framebuffer (%d) \n " , aligned_size ) ;
2015-04-20 16:55:21 -04:00
return - ENOMEM ;
}
2016-09-15 15:06:50 +02:00
abo = gem_to_amdgpu_bo ( gobj ) ;
2015-04-20 16:55:21 -04:00
if ( fb_tiled )
2015-05-14 23:48:26 +02:00
tiling_flags = AMDGPU_TILING_SET ( ARRAY_MODE , GRPH_ARRAY_2D_TILED_THIN1 ) ;
2015-04-20 16:55:21 -04:00
2016-09-15 15:06:50 +02:00
ret = amdgpu_bo_reserve ( abo , false ) ;
2015-04-20 16:55:21 -04:00
if ( unlikely ( ret ! = 0 ) )
goto out_unref ;
if ( tiling_flags ) {
2016-09-15 15:06:50 +02:00
ret = amdgpu_bo_set_tiling_flags ( abo ,
2015-05-14 23:03:57 +02:00
tiling_flags ) ;
2015-04-20 16:55:21 -04:00
if ( ret )
dev_err ( adev - > dev , " FB failed to set tiling flags \n " ) ;
}
2016-12-07 16:14:38 -05:00
ret = amdgpu_bo_pin ( abo , AMDGPU_GEM_DOMAIN_VRAM , NULL ) ;
2015-04-20 16:55:21 -04:00
if ( ret ) {
2016-09-15 15:06:50 +02:00
amdgpu_bo_unreserve ( abo ) ;
2015-04-20 16:55:21 -04:00
goto out_unref ;
}
2016-09-15 15:06:50 +02:00
ret = amdgpu_bo_kmap ( abo , NULL ) ;
amdgpu_bo_unreserve ( abo ) ;
2015-04-20 16:55:21 -04:00
if ( ret ) {
goto out_unref ;
}
* gobj_p = gobj ;
return 0 ;
out_unref :
amdgpufb_destroy_pinned_object ( gobj ) ;
* gobj_p = NULL ;
return ret ;
}
static int amdgpufb_create ( struct drm_fb_helper * helper ,
struct drm_fb_helper_surface_size * sizes )
{
struct amdgpu_fbdev * rfbdev = ( struct amdgpu_fbdev * ) helper ;
struct amdgpu_device * adev = rfbdev - > adev ;
struct fb_info * info ;
struct drm_framebuffer * fb = NULL ;
struct drm_mode_fb_cmd2 mode_cmd ;
struct drm_gem_object * gobj = NULL ;
2016-09-15 15:06:50 +02:00
struct amdgpu_bo * abo = NULL ;
2015-04-20 16:55:21 -04:00
int ret ;
unsigned long tmp ;
mode_cmd . width = sizes - > surface_width ;
mode_cmd . height = sizes - > surface_height ;
if ( sizes - > surface_bpp = = 24 )
sizes - > surface_bpp = 32 ;
mode_cmd . pixel_format = drm_mode_legacy_fb_format ( sizes - > surface_bpp ,
sizes - > surface_depth ) ;
ret = amdgpufb_create_pinned_object ( rfbdev , & mode_cmd , & gobj ) ;
if ( ret ) {
DRM_ERROR ( " failed to create fbcon object %d \n " , ret ) ;
return ret ;
}
2016-09-15 15:06:50 +02:00
abo = gem_to_amdgpu_bo ( gobj ) ;
2015-04-20 16:55:21 -04:00
/* okay we have an object now allocate the framebuffer */
2015-07-31 16:22:00 +05:30
info = drm_fb_helper_alloc_fbi ( helper ) ;
if ( IS_ERR ( info ) ) {
ret = PTR_ERR ( info ) ;
2017-02-07 17:16:03 +01:00
goto out ;
2015-04-20 16:55:21 -04:00
}
info - > par = rfbdev ;
2015-11-02 10:52:32 -05:00
info - > skip_vt_switch = true ;
2015-04-20 16:55:21 -04:00
ret = amdgpu_framebuffer_init ( adev - > ddev , & rfbdev - > rfb , & mode_cmd , gobj ) ;
if ( ret ) {
DRM_ERROR ( " failed to initialize framebuffer %d \n " , ret ) ;
2017-02-07 17:16:03 +01:00
goto out ;
2015-04-20 16:55:21 -04:00
}
fb = & rfbdev - > rfb . base ;
/* setup helper */
rfbdev - > helper . fb = fb ;
strcpy ( info - > fix . id , " amdgpudrmfb " ) ;
2016-12-14 23:31:35 +02:00
drm_fb_helper_fill_fix ( info , fb - > pitches [ 0 ] , fb - > format - > depth ) ;
2015-04-20 16:55:21 -04:00
info - > fbops = & amdgpufb_ops ;
2016-09-15 15:06:50 +02:00
tmp = amdgpu_bo_gpu_offset ( abo ) - adev - > mc . vram_start ;
2015-04-20 16:55:21 -04:00
info - > fix . smem_start = adev - > mc . aper_base + tmp ;
2016-09-15 15:06:50 +02:00
info - > fix . smem_len = amdgpu_bo_size ( abo ) ;
info - > screen_base = abo - > kptr ;
info - > screen_size = amdgpu_bo_size ( abo ) ;
2015-04-20 16:55:21 -04:00
drm_fb_helper_fill_var ( info , & rfbdev - > helper , sizes - > fb_width , sizes - > fb_height ) ;
/* setup aperture base/size for vesafb takeover */
info - > apertures - > ranges [ 0 ] . base = adev - > ddev - > mode_config . fb_base ;
info - > apertures - > ranges [ 0 ] . size = adev - > mc . aper_size ;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
if ( info - > screen_base = = NULL ) {
ret = - ENOSPC ;
2017-02-07 17:16:03 +01:00
goto out ;
2015-04-20 16:55:21 -04:00
}
DRM_INFO ( " fb mappable at 0x%lX \n " , info - > fix . smem_start ) ;
DRM_INFO ( " vram apper at 0x%lX \n " , ( unsigned long ) adev - > mc . aper_base ) ;
2016-09-15 15:06:50 +02:00
DRM_INFO ( " size %lu \n " , ( unsigned long ) amdgpu_bo_size ( abo ) ) ;
2016-12-14 23:31:35 +02:00
DRM_INFO ( " fb depth is %d \n " , fb - > format - > depth ) ;
2015-04-20 16:55:21 -04:00
DRM_INFO ( " pitch is %d \n " , fb - > pitches [ 0 ] ) ;
vga_switcheroo_client_fb_set ( adev - > ddev - > pdev , info ) ;
return 0 ;
2017-02-07 17:16:03 +01:00
out :
2016-09-15 15:06:50 +02:00
if ( abo ) {
2015-04-20 16:55:21 -04:00
}
if ( fb & & ret ) {
2015-11-23 10:32:37 +01:00
drm_gem_object_unreference_unlocked ( gobj ) ;
2015-04-20 16:55:21 -04:00
drm_framebuffer_unregister_private ( fb ) ;
drm_framebuffer_cleanup ( fb ) ;
kfree ( fb ) ;
}
return ret ;
}
void amdgpu_fb_output_poll_changed ( struct amdgpu_device * adev )
{
if ( adev - > mode_info . rfbdev )
drm_fb_helper_hotplug_event ( & adev - > mode_info . rfbdev - > helper ) ;
}
static int amdgpu_fbdev_destroy ( struct drm_device * dev , struct amdgpu_fbdev * rfbdev )
{
struct amdgpu_framebuffer * rfb = & rfbdev - > rfb ;
2015-07-31 16:22:00 +05:30
drm_fb_helper_unregister_fbi ( & rfbdev - > helper ) ;
2015-04-20 16:55:21 -04:00
if ( rfb - > obj ) {
amdgpufb_destroy_pinned_object ( rfb - > obj ) ;
rfb - > obj = NULL ;
}
drm_fb_helper_fini ( & rfbdev - > helper ) ;
drm_framebuffer_unregister_private ( & rfb - > base ) ;
drm_framebuffer_cleanup ( & rfb - > base ) ;
return 0 ;
}
static const struct drm_fb_helper_funcs amdgpu_fb_helper_funcs = {
. fb_probe = amdgpufb_create ,
} ;
int amdgpu_fbdev_init ( struct amdgpu_device * adev )
{
struct amdgpu_fbdev * rfbdev ;
int bpp_sel = 32 ;
int ret ;
/* don't init fbdev on hw without DCE */
if ( ! adev - > mode_info . mode_config_initialized )
return 0 ;
2016-01-26 00:30:33 -05:00
/* don't init fbdev if there are no connectors */
if ( list_empty ( & adev - > ddev - > mode_config . connector_list ) )
return 0 ;
2015-04-20 16:55:21 -04:00
/* select 8 bpp console on low vram cards */
if ( adev - > mc . real_vram_size < = ( 32 * 1024 * 1024 ) )
bpp_sel = 8 ;
rfbdev = kzalloc ( sizeof ( struct amdgpu_fbdev ) , GFP_KERNEL ) ;
if ( ! rfbdev )
return - ENOMEM ;
rfbdev - > adev = adev ;
adev - > mode_info . rfbdev = rfbdev ;
drm_fb_helper_prepare ( adev - > ddev , & rfbdev - > helper ,
& amdgpu_fb_helper_funcs ) ;
ret = drm_fb_helper_init ( adev - > ddev , & rfbdev - > helper ,
AMDGPUFB_CONN_LIMIT ) ;
if ( ret ) {
kfree ( rfbdev ) ;
return ret ;
}
drm_fb_helper_single_add_all_connectors ( & rfbdev - > helper ) ;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions ( adev - > ddev ) ;
drm_fb_helper_initial_config ( & rfbdev - > helper , bpp_sel ) ;
return 0 ;
}
void amdgpu_fbdev_fini ( struct amdgpu_device * adev )
{
if ( ! adev - > mode_info . rfbdev )
return ;
amdgpu_fbdev_destroy ( adev - > ddev , adev - > mode_info . rfbdev ) ;
kfree ( adev - > mode_info . rfbdev ) ;
adev - > mode_info . rfbdev = NULL ;
}
void amdgpu_fbdev_set_suspend ( struct amdgpu_device * adev , int state )
{
if ( adev - > mode_info . rfbdev )
2015-07-31 16:22:00 +05:30
drm_fb_helper_set_suspend ( & adev - > mode_info . rfbdev - > helper ,
state ) ;
2015-04-20 16:55:21 -04:00
}
int amdgpu_fbdev_total_size ( struct amdgpu_device * adev )
{
struct amdgpu_bo * robj ;
int size = 0 ;
if ( ! adev - > mode_info . rfbdev )
return 0 ;
robj = gem_to_amdgpu_bo ( adev - > mode_info . rfbdev - > rfb . obj ) ;
size + = amdgpu_bo_size ( robj ) ;
return size ;
}
bool amdgpu_fbdev_robj_is_fb ( struct amdgpu_device * adev , struct amdgpu_bo * robj )
{
if ( ! adev - > mode_info . rfbdev )
return false ;
if ( robj = = gem_to_amdgpu_bo ( adev - > mode_info . rfbdev - > rfb . obj ) )
return true ;
return false ;
}
2015-10-02 16:59:34 -04:00
void amdgpu_fbdev_restore_mode ( struct amdgpu_device * adev )
{
2017-05-22 13:11:41 +08:00
struct amdgpu_fbdev * afbdev ;
2015-10-02 16:59:34 -04:00
struct drm_fb_helper * fb_helper ;
int ret ;
2017-05-22 13:11:41 +08:00
if ( ! adev )
return ;
afbdev = adev - > mode_info . rfbdev ;
2015-10-02 16:59:34 -04:00
if ( ! afbdev )
return ;
fb_helper = & afbdev - > helper ;
ret = drm_fb_helper_restore_fbdev_mode_unlocked ( fb_helper ) ;
if ( ret )
DRM_DEBUG ( " failed to restore crtc mode \n " ) ;
}