2015-04-20 16:55:21 -04:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
# include <drm/drmP.h>
# include "amdgpu.h"
# include <drm/amdgpu_drm.h>
# include "amdgpu_uvd.h"
# include "amdgpu_vce.h"
# include <linux/vga_switcheroo.h>
# include <linux/slab.h>
# include <linux/pm_runtime.h>
# if defined(CONFIG_VGA_SWITCHEROO)
bool amdgpu_has_atpx ( void ) ;
# else
static inline bool amdgpu_has_atpx ( void ) { return false ; }
# endif
/**
* amdgpu_driver_unload_kms - Main unload function for KMS .
*
* @ dev : drm dev pointer
*
* This is the main unload function for KMS ( all asics ) .
* Returns 0 on success .
*/
int amdgpu_driver_unload_kms ( struct drm_device * dev )
{
struct amdgpu_device * adev = dev - > dev_private ;
if ( adev = = NULL )
return 0 ;
if ( adev - > rmmio = = NULL )
goto done_free ;
pm_runtime_get_sync ( dev - > dev ) ;
amdgpu_acpi_fini ( adev ) ;
amdgpu_device_fini ( adev ) ;
done_free :
kfree ( adev ) ;
dev - > dev_private = NULL ;
return 0 ;
}
/**
* amdgpu_driver_load_kms - Main load function for KMS .
*
* @ dev : drm dev pointer
* @ flags : device flags
*
* This is the main load function for KMS ( all asics ) .
* Returns 0 on success , error on failure .
*/
int amdgpu_driver_load_kms ( struct drm_device * dev , unsigned long flags )
{
struct amdgpu_device * adev ;
int r , acpi_status ;
adev = kzalloc ( sizeof ( struct amdgpu_device ) , GFP_KERNEL ) ;
if ( adev = = NULL ) {
return - ENOMEM ;
}
dev - > dev_private = ( void * ) adev ;
if ( ( amdgpu_runtime_pm ! = 0 ) & &
amdgpu_has_atpx ( ) & &
( ( flags & AMDGPU_IS_APU ) = = 0 ) )
flags | = AMDGPU_IS_PX ;
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure ,
* or memory manager initialization failure , it must
* properly initialize the GPU MC controller and permit
* VRAM allocation
*/
r = amdgpu_device_init ( adev , dev , dev - > pdev , flags ) ;
if ( r ) {
dev_err ( & dev - > pdev - > dev , " Fatal error during GPU init \n " ) ;
goto out ;
}
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
if ( ! r ) {
acpi_status = amdgpu_acpi_init ( adev ) ;
if ( acpi_status )
dev_dbg ( & dev - > pdev - > dev ,
" Error during ACPI methods call \n " ) ;
}
if ( amdgpu_device_is_px ( dev ) ) {
pm_runtime_use_autosuspend ( dev - > dev ) ;
pm_runtime_set_autosuspend_delay ( dev - > dev , 5000 ) ;
pm_runtime_set_active ( dev - > dev ) ;
pm_runtime_allow ( dev - > dev ) ;
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_runtime_put_autosuspend ( dev - > dev ) ;
}
out :
if ( r )
amdgpu_driver_unload_kms ( dev ) ;
return r ;
}
/*
* Userspace get information ioctl
*/
/**
* amdgpu_info_ioctl - answer a device specific request .
*
* @ adev : amdgpu device pointer
* @ data : request object
* @ filp : drm filp
*
* This function is used to pass device specific parameters to the userspace
* drivers . Examples include : pci device id , pipeline parms , tiling params ,
* etc . ( all asics ) .
* Returns 0 on success , - EINVAL on failure .
*/
static int amdgpu_info_ioctl ( struct drm_device * dev , void * data , struct drm_file * filp )
{
struct amdgpu_device * adev = dev - > dev_private ;
struct drm_amdgpu_info * info = data ;
struct amdgpu_mode_info * minfo = & adev - > mode_info ;
void __user * out = ( void __user * ) ( long ) info - > return_pointer ;
uint32_t size = info - > return_size ;
struct drm_crtc * crtc ;
uint32_t ui32 = 0 ;
uint64_t ui64 = 0 ;
int i , found ;
if ( ! info - > return_size | | ! info - > return_pointer )
return - EINVAL ;
switch ( info - > query ) {
case AMDGPU_INFO_ACCEL_WORKING :
ui32 = adev - > accel_working ;
return copy_to_user ( out , & ui32 , min ( size , 4u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_CRTC_FROM_ID :
for ( i = 0 , found = 0 ; i < adev - > mode_info . num_crtc ; i + + ) {
crtc = ( struct drm_crtc * ) minfo - > crtcs [ i ] ;
if ( crtc & & crtc - > base . id = = info - > mode_crtc . id ) {
struct amdgpu_crtc * amdgpu_crtc = to_amdgpu_crtc ( crtc ) ;
ui32 = amdgpu_crtc - > crtc_id ;
found = 1 ;
break ;
}
}
if ( ! found ) {
DRM_DEBUG_KMS ( " unknown crtc id %d \n " , info - > mode_crtc . id ) ;
return - EINVAL ;
}
return copy_to_user ( out , & ui32 , min ( size , 4u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_HW_IP_INFO : {
struct drm_amdgpu_info_hw_ip ip = { } ;
2015-05-22 14:39:35 -04:00
enum amd_ip_block_type type ;
2015-04-20 16:55:21 -04:00
uint32_t ring_mask = 0 ;
2015-06-04 21:26:57 +08:00
uint32_t ib_start_alignment = 0 ;
uint32_t ib_size_alignment = 0 ;
2015-04-20 16:55:21 -04:00
if ( info - > query_hw_ip . ip_instance > = AMDGPU_HW_IP_INSTANCE_MAX_COUNT )
return - EINVAL ;
switch ( info - > query_hw_ip . type ) {
case AMDGPU_HW_IP_GFX :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_GFX ;
2015-04-20 16:55:21 -04:00
for ( i = 0 ; i < adev - > gfx . num_gfx_rings ; i + + )
ring_mask | = ( ( adev - > gfx . gfx_ring [ i ] . ready ? 1 : 0 ) < < i ) ;
2015-06-04 21:26:57 +08:00
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE ;
ib_size_alignment = 8 ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_COMPUTE :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_GFX ;
2015-04-20 16:55:21 -04:00
for ( i = 0 ; i < adev - > gfx . num_compute_rings ; i + + )
ring_mask | = ( ( adev - > gfx . compute_ring [ i ] . ready ? 1 : 0 ) < < i ) ;
2015-06-04 21:26:57 +08:00
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE ;
ib_size_alignment = 8 ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_DMA :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_SDMA ;
2015-04-20 16:55:21 -04:00
ring_mask = adev - > sdma [ 0 ] . ring . ready ? 1 : 0 ;
ring_mask | = ( ( adev - > sdma [ 1 ] . ring . ready ? 1 : 0 ) < < 1 ) ;
2015-06-04 21:26:57 +08:00
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE ;
ib_size_alignment = 1 ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_UVD :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_UVD ;
2015-04-20 16:55:21 -04:00
ring_mask = adev - > uvd . ring . ready ? 1 : 0 ;
2015-06-04 21:26:57 +08:00
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE ;
ib_size_alignment = 8 ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_VCE :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_VCE ;
2015-04-20 16:55:21 -04:00
for ( i = 0 ; i < AMDGPU_MAX_VCE_RINGS ; i + + )
ring_mask | = ( ( adev - > vce . ring [ i ] . ready ? 1 : 0 ) < < i ) ;
2015-06-04 21:26:57 +08:00
ib_start_alignment = AMDGPU_GPU_PAGE_SIZE ;
ib_size_alignment = 8 ;
2015-04-20 16:55:21 -04:00
break ;
default :
return - EINVAL ;
}
for ( i = 0 ; i < adev - > num_ip_blocks ; i + + ) {
if ( adev - > ip_blocks [ i ] . type = = type & &
2015-07-28 11:50:31 -04:00
adev - > ip_block_status [ i ] . valid ) {
2015-04-20 16:55:21 -04:00
ip . hw_ip_version_major = adev - > ip_blocks [ i ] . major ;
ip . hw_ip_version_minor = adev - > ip_blocks [ i ] . minor ;
ip . capabilities_flags = 0 ;
ip . available_rings = ring_mask ;
2015-06-04 21:26:57 +08:00
ip . ib_start_alignment = ib_start_alignment ;
ip . ib_size_alignment = ib_size_alignment ;
2015-04-20 16:55:21 -04:00
break ;
}
}
return copy_to_user ( out , & ip ,
min ( ( size_t ) size , sizeof ( ip ) ) ) ? - EFAULT : 0 ;
}
case AMDGPU_INFO_HW_IP_COUNT : {
2015-05-22 14:39:35 -04:00
enum amd_ip_block_type type ;
2015-04-20 16:55:21 -04:00
uint32_t count = 0 ;
switch ( info - > query_hw_ip . type ) {
case AMDGPU_HW_IP_GFX :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_GFX ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_COMPUTE :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_GFX ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_DMA :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_SDMA ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_UVD :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_UVD ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_HW_IP_VCE :
2015-05-22 14:39:35 -04:00
type = AMD_IP_BLOCK_TYPE_VCE ;
2015-04-20 16:55:21 -04:00
break ;
default :
return - EINVAL ;
}
for ( i = 0 ; i < adev - > num_ip_blocks ; i + + )
if ( adev - > ip_blocks [ i ] . type = = type & &
2015-07-28 11:50:31 -04:00
adev - > ip_block_status [ i ] . valid & &
2015-04-20 16:55:21 -04:00
count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT )
count + + ;
return copy_to_user ( out , & count , min ( size , 4u ) ) ? - EFAULT : 0 ;
}
case AMDGPU_INFO_TIMESTAMP :
ui64 = amdgpu_asic_get_gpu_clock_counter ( adev ) ;
return copy_to_user ( out , & ui64 , min ( size , 8u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_FW_VERSION : {
struct drm_amdgpu_info_firmware fw_info ;
/* We only support one instance of each IP block right now. */
if ( info - > query_fw . ip_instance ! = 0 )
return - EINVAL ;
switch ( info - > query_fw . fw_type ) {
case AMDGPU_INFO_FW_VCE :
fw_info . ver = adev - > vce . fw_version ;
fw_info . feature = adev - > vce . fb_version ;
break ;
case AMDGPU_INFO_FW_UVD :
fw_info . ver = 0 ;
fw_info . feature = 0 ;
break ;
case AMDGPU_INFO_FW_GMC :
fw_info . ver = adev - > mc . fw_version ;
fw_info . feature = 0 ;
break ;
case AMDGPU_INFO_FW_GFX_ME :
fw_info . ver = adev - > gfx . me_fw_version ;
2015-06-03 19:52:06 +08:00
fw_info . feature = adev - > gfx . me_feature_version ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_INFO_FW_GFX_PFP :
fw_info . ver = adev - > gfx . pfp_fw_version ;
2015-06-03 19:52:06 +08:00
fw_info . feature = adev - > gfx . pfp_feature_version ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_INFO_FW_GFX_CE :
fw_info . ver = adev - > gfx . ce_fw_version ;
2015-06-03 19:52:06 +08:00
fw_info . feature = adev - > gfx . ce_feature_version ;
2015-04-20 16:55:21 -04:00
break ;
case AMDGPU_INFO_FW_GFX_RLC :
fw_info . ver = adev - > gfx . rlc_fw_version ;
fw_info . feature = 0 ;
break ;
case AMDGPU_INFO_FW_GFX_MEC :
if ( info - > query_fw . index = = 0 )
fw_info . ver = adev - > gfx . mec_fw_version ;
else if ( info - > query_fw . index = = 1 )
fw_info . ver = adev - > gfx . mec2_fw_version ;
else
return - EINVAL ;
fw_info . feature = 0 ;
break ;
case AMDGPU_INFO_FW_SMC :
fw_info . ver = adev - > pm . fw_version ;
fw_info . feature = 0 ;
break ;
case AMDGPU_INFO_FW_SDMA :
if ( info - > query_fw . index > = 2 )
return - EINVAL ;
fw_info . ver = adev - > sdma [ info - > query_fw . index ] . fw_version ;
fw_info . feature = 0 ;
break ;
default :
return - EINVAL ;
}
return copy_to_user ( out , & fw_info ,
min ( ( size_t ) size , sizeof ( fw_info ) ) ) ? - EFAULT : 0 ;
}
case AMDGPU_INFO_NUM_BYTES_MOVED :
ui64 = atomic64_read ( & adev - > num_bytes_moved ) ;
return copy_to_user ( out , & ui64 , min ( size , 8u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_VRAM_USAGE :
ui64 = atomic64_read ( & adev - > vram_usage ) ;
return copy_to_user ( out , & ui64 , min ( size , 8u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_VIS_VRAM_USAGE :
ui64 = atomic64_read ( & adev - > vram_vis_usage ) ;
return copy_to_user ( out , & ui64 , min ( size , 8u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_GTT_USAGE :
ui64 = atomic64_read ( & adev - > gtt_usage ) ;
return copy_to_user ( out , & ui64 , min ( size , 8u ) ) ? - EFAULT : 0 ;
case AMDGPU_INFO_GDS_CONFIG : {
struct drm_amdgpu_info_gds gds_info ;
2015-04-30 11:47:03 -04:00
memset ( & gds_info , 0 , sizeof ( gds_info ) ) ;
2015-04-20 16:55:21 -04:00
gds_info . gds_gfx_partition_size = adev - > gds . mem . gfx_partition_size > > AMDGPU_GDS_SHIFT ;
gds_info . compute_partition_size = adev - > gds . mem . cs_partition_size > > AMDGPU_GDS_SHIFT ;
gds_info . gds_total_size = adev - > gds . mem . total_size > > AMDGPU_GDS_SHIFT ;
gds_info . gws_per_gfx_partition = adev - > gds . gws . gfx_partition_size > > AMDGPU_GWS_SHIFT ;
gds_info . gws_per_compute_partition = adev - > gds . gws . cs_partition_size > > AMDGPU_GWS_SHIFT ;
gds_info . oa_per_gfx_partition = adev - > gds . oa . gfx_partition_size > > AMDGPU_OA_SHIFT ;
gds_info . oa_per_compute_partition = adev - > gds . oa . cs_partition_size > > AMDGPU_OA_SHIFT ;
return copy_to_user ( out , & gds_info ,
min ( ( size_t ) size , sizeof ( gds_info ) ) ) ? - EFAULT : 0 ;
}
case AMDGPU_INFO_VRAM_GTT : {
struct drm_amdgpu_info_vram_gtt vram_gtt ;
vram_gtt . vram_size = adev - > mc . real_vram_size ;
vram_gtt . vram_cpu_accessible_size = adev - > mc . visible_vram_size ;
vram_gtt . vram_cpu_accessible_size - = adev - > vram_pin_size ;
vram_gtt . gtt_size = adev - > mc . gtt_size ;
vram_gtt . gtt_size - = adev - > gart_pin_size ;
return copy_to_user ( out , & vram_gtt ,
min ( ( size_t ) size , sizeof ( vram_gtt ) ) ) ? - EFAULT : 0 ;
}
case AMDGPU_INFO_READ_MMR_REG : {
unsigned n , alloc_size = info - > read_mmr_reg . count * 4 ;
uint32_t * regs ;
unsigned se_num = ( info - > read_mmr_reg . instance > >
AMDGPU_INFO_MMR_SE_INDEX_SHIFT ) &
AMDGPU_INFO_MMR_SE_INDEX_MASK ;
unsigned sh_num = ( info - > read_mmr_reg . instance > >
AMDGPU_INFO_MMR_SH_INDEX_SHIFT ) &
AMDGPU_INFO_MMR_SH_INDEX_MASK ;
/* set full masks if the userspace set all bits
* in the bitfields */
if ( se_num = = AMDGPU_INFO_MMR_SE_INDEX_MASK )
se_num = 0xffffffff ;
if ( sh_num = = AMDGPU_INFO_MMR_SH_INDEX_MASK )
sh_num = 0xffffffff ;
regs = kmalloc ( alloc_size , GFP_KERNEL ) ;
if ( ! regs )
return - ENOMEM ;
for ( i = 0 ; i < info - > read_mmr_reg . count ; i + + )
if ( amdgpu_asic_read_register ( adev , se_num , sh_num ,
info - > read_mmr_reg . dword_offset + i ,
& regs [ i ] ) ) {
DRM_DEBUG_KMS ( " unallowed offset %#x \n " ,
info - > read_mmr_reg . dword_offset + i ) ;
kfree ( regs ) ;
return - EFAULT ;
}
n = copy_to_user ( out , regs , min ( size , alloc_size ) ) ;
kfree ( regs ) ;
return n ? - EFAULT : 0 ;
}
case AMDGPU_INFO_DEV_INFO : {
2015-07-28 18:51:29 +03:00
struct drm_amdgpu_info_device dev_info = { } ;
2015-04-20 16:55:21 -04:00
struct amdgpu_cu_info cu_info ;
dev_info . device_id = dev - > pdev - > device ;
dev_info . chip_rev = adev - > rev_id ;
dev_info . external_rev = adev - > external_rev_id ;
dev_info . pci_rev = dev - > pdev - > revision ;
dev_info . family = adev - > family ;
dev_info . num_shader_engines = adev - > gfx . config . max_shader_engines ;
dev_info . num_shader_arrays_per_engine = adev - > gfx . config . max_sh_per_se ;
/* return all clocks in KHz */
dev_info . gpu_counter_freq = amdgpu_asic_get_xclk ( adev ) * 10 ;
2015-06-03 17:36:54 +08:00
if ( adev - > pm . dpm_enabled ) {
2015-04-20 16:55:21 -04:00
dev_info . max_engine_clock =
adev - > pm . dpm . dyn_state . max_clock_voltage_on_ac . sclk * 10 ;
2015-06-03 17:36:54 +08:00
dev_info . max_memory_clock =
adev - > pm . dpm . dyn_state . max_clock_voltage_on_ac . mclk * 10 ;
} else {
2015-04-20 16:55:21 -04:00
dev_info . max_engine_clock = adev - > pm . default_sclk * 10 ;
2015-06-03 17:36:54 +08:00
dev_info . max_memory_clock = adev - > pm . default_mclk * 10 ;
}
2015-04-20 16:55:21 -04:00
dev_info . enabled_rb_pipes_mask = adev - > gfx . config . backend_enable_mask ;
dev_info . num_rb_pipes = adev - > gfx . config . max_backends_per_se *
adev - > gfx . config . max_shader_engines ;
dev_info . num_hw_gfx_contexts = adev - > gfx . config . max_hw_contexts ;
dev_info . _pad = 0 ;
dev_info . ids_flags = 0 ;
if ( adev - > flags & AMDGPU_IS_APU )
dev_info . ids_flags | = AMDGPU_IDS_FLAGS_FUSION ;
dev_info . virtual_address_offset = AMDGPU_VA_RESERVED_SIZE ;
2015-05-12 22:46:45 +08:00
dev_info . virtual_address_max = ( uint64_t ) adev - > vm_manager . max_pfn * AMDGPU_GPU_PAGE_SIZE ;
2015-04-20 16:55:21 -04:00
dev_info . virtual_address_alignment = max ( PAGE_SIZE , 0x10000UL ) ;
dev_info . pte_fragment_size = ( 1 < < AMDGPU_LOG2_PAGES_PER_FRAG ) *
AMDGPU_GPU_PAGE_SIZE ;
dev_info . gart_page_size = AMDGPU_GPU_PAGE_SIZE ;
amdgpu_asic_get_cu_info ( adev , & cu_info ) ;
dev_info . cu_active_number = cu_info . number ;
dev_info . cu_ao_mask = cu_info . ao_cu_mask ;
2015-06-03 17:47:54 +08:00
dev_info . ce_ram_size = adev - > gfx . ce_ram_size ;
2015-04-20 16:55:21 -04:00
memcpy ( & dev_info . cu_bitmap [ 0 ] , & cu_info . bitmap [ 0 ] , sizeof ( cu_info . bitmap ) ) ;
2015-06-03 21:02:01 +08:00
dev_info . vram_type = adev - > mc . vram_type ;
dev_info . vram_bit_width = adev - > mc . vram_width ;
2015-07-13 12:46:23 -04:00
dev_info . vce_harvest_config = adev - > vce . harvest_config ;
2015-04-20 16:55:21 -04:00
return copy_to_user ( out , & dev_info ,
min ( ( size_t ) size , sizeof ( dev_info ) ) ) ? - EFAULT : 0 ;
}
default :
DRM_DEBUG_KMS ( " Invalid request %d \n " , info - > query ) ;
return - EINVAL ;
}
return 0 ;
}
/*
* Outdated mess for old drm with Xorg being in charge ( void function now ) .
*/
/**
* amdgpu_driver_firstopen_kms - drm callback for last close
*
* @ dev : drm dev pointer
*
* Switch vga switcheroo state after last close ( all asics ) .
*/
void amdgpu_driver_lastclose_kms ( struct drm_device * dev )
{
vga_switcheroo_process_delayed_switch ( ) ;
}
/**
* amdgpu_driver_open_kms - drm callback for open
*
* @ dev : drm dev pointer
* @ file_priv : drm file
*
* On device open , init vm on cayman + ( all asics ) .
* Returns 0 on success , error on failure .
*/
int amdgpu_driver_open_kms ( struct drm_device * dev , struct drm_file * file_priv )
{
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv ;
int r ;
file_priv - > driver_priv = NULL ;
r = pm_runtime_get_sync ( dev - > dev ) ;
if ( r < 0 )
return r ;
fpriv = kzalloc ( sizeof ( * fpriv ) , GFP_KERNEL ) ;
if ( unlikely ( ! fpriv ) )
return - ENOMEM ;
r = amdgpu_vm_init ( adev , & fpriv - > vm ) ;
if ( r )
goto error_free ;
mutex_init ( & fpriv - > bo_list_lock ) ;
idr_init ( & fpriv - > bo_list_handles ) ;
/* init context manager */
2015-05-05 20:52:00 +02:00
mutex_init ( & fpriv - > ctx_mgr . lock ) ;
2015-04-20 16:55:21 -04:00
idr_init ( & fpriv - > ctx_mgr . ctx_handles ) ;
fpriv - > ctx_mgr . adev = adev ;
file_priv - > driver_priv = fpriv ;
pm_runtime_mark_last_busy ( dev - > dev ) ;
pm_runtime_put_autosuspend ( dev - > dev ) ;
return 0 ;
error_free :
kfree ( fpriv ) ;
return r ;
}
/**
* amdgpu_driver_postclose_kms - drm callback for post close
*
* @ dev : drm dev pointer
* @ file_priv : drm file
*
* On device post close , tear down vm on cayman + ( all asics ) .
*/
void amdgpu_driver_postclose_kms ( struct drm_device * dev ,
struct drm_file * file_priv )
{
struct amdgpu_device * adev = dev - > dev_private ;
struct amdgpu_fpriv * fpriv = file_priv - > driver_priv ;
struct amdgpu_bo_list * list ;
int handle ;
if ( ! fpriv )
return ;
amdgpu_vm_fini ( adev , & fpriv - > vm ) ;
idr_for_each_entry ( & fpriv - > bo_list_handles , list , handle )
amdgpu_bo_list_free ( list ) ;
idr_destroy ( & fpriv - > bo_list_handles ) ;
mutex_destroy ( & fpriv - > bo_list_lock ) ;
/* release context */
amdgpu_ctx_fini ( fpriv ) ;
kfree ( fpriv ) ;
file_priv - > driver_priv = NULL ;
}
/**
* amdgpu_driver_preclose_kms - drm callback for pre close
*
* @ dev : drm dev pointer
* @ file_priv : drm file
*
* On device pre close , tear down hyperz and cmask filps on r1xx - r5xx
* ( all asics ) .
*/
void amdgpu_driver_preclose_kms ( struct drm_device * dev ,
struct drm_file * file_priv )
{
struct amdgpu_device * adev = dev - > dev_private ;
amdgpu_uvd_free_handles ( adev , file_priv ) ;
amdgpu_vce_free_handles ( adev , file_priv ) ;
}
/*
* VBlank related functions .
*/
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
* @ dev : drm dev pointer
* @ crtc : crtc to get the frame count from
*
* Gets the frame count on the requested crtc ( all asics ) .
* Returns frame count on success , - EINVAL on failure .
*/
u32 amdgpu_get_vblank_counter_kms ( struct drm_device * dev , int crtc )
{
struct amdgpu_device * adev = dev - > dev_private ;
if ( crtc < 0 | | crtc > = adev - > mode_info . num_crtc ) {
DRM_ERROR ( " Invalid crtc %d \n " , crtc ) ;
return - EINVAL ;
}
return amdgpu_display_vblank_get_counter ( adev , crtc ) ;
}
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
* @ dev : drm dev pointer
* @ crtc : crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc ( all asics ) .
* Returns 0 on success , - EINVAL on failure .
*/
int amdgpu_enable_vblank_kms ( struct drm_device * dev , int crtc )
{
struct amdgpu_device * adev = dev - > dev_private ;
int idx = amdgpu_crtc_idx_to_irq_type ( adev , crtc ) ;
return amdgpu_irq_get ( adev , & adev - > crtc_irq , idx ) ;
}
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
* @ dev : drm dev pointer
* @ crtc : crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc ( all asics ) .
*/
void amdgpu_disable_vblank_kms ( struct drm_device * dev , int crtc )
{
struct amdgpu_device * adev = dev - > dev_private ;
int idx = amdgpu_crtc_idx_to_irq_type ( adev , crtc ) ;
amdgpu_irq_put ( adev , & adev - > crtc_irq , idx ) ;
}
/**
* amdgpu_get_vblank_timestamp_kms - get vblank timestamp
*
* @ dev : drm dev pointer
* @ crtc : crtc to get the timestamp for
* @ max_error : max error
* @ vblank_time : time value
* @ flags : flags passed to the driver
*
* Gets the timestamp on the requested crtc based on the
* scanout position . ( all asics ) .
* Returns postive status flags on success , negative error on failure .
*/
int amdgpu_get_vblank_timestamp_kms ( struct drm_device * dev , int crtc ,
int * max_error ,
struct timeval * vblank_time ,
unsigned flags )
{
struct drm_crtc * drmcrtc ;
struct amdgpu_device * adev = dev - > dev_private ;
if ( crtc < 0 | | crtc > = dev - > num_crtcs ) {
DRM_ERROR ( " Invalid crtc %d \n " , crtc ) ;
return - EINVAL ;
}
/* Get associated drm_crtc: */
drmcrtc = & adev - > mode_info . crtcs [ crtc ] - > base ;
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos ( dev , crtc , max_error ,
vblank_time , flags ,
drmcrtc , & drmcrtc - > hwmode ) ;
}
const struct drm_ioctl_desc amdgpu_ioctls_kms [ ] = {
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_CREATE , amdgpu_gem_create_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_CTX , amdgpu_ctx_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_BO_LIST , amdgpu_bo_list_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
/* KMS */
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_MMAP , amdgpu_gem_mmap_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_WAIT_IDLE , amdgpu_gem_wait_idle_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_CS , amdgpu_cs_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_INFO , amdgpu_info_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_WAIT_CS , amdgpu_cs_wait_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_METADATA , amdgpu_gem_metadata_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_VA , amdgpu_gem_va_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_OP , amdgpu_gem_op_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
DRM_IOCTL_DEF_DRV ( AMDGPU_GEM_USERPTR , amdgpu_gem_userptr_ioctl , DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW ) ,
} ;
int amdgpu_max_kms_ioctl = ARRAY_SIZE ( amdgpu_ioctls_kms ) ;