2015-04-20 16:55:21 -04:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
2018-06-07 17:27:07 -04:00
/**
* DOC : Interrupt Handling
*
* Interrupts generated within GPU hardware raise interrupt requests that are
* passed to amdgpu IRQ handler which is responsible for detecting source and
* type of the interrupt and dispatching matching handlers . If handling an
* interrupt requires calling kernel functions that may sleep processing is
* dispatched to work handlers .
*
* If MSI functionality is not disabled by module parameter then MSI
* support will be enabled .
*
* For GPU interrupt sources that may be driven by another driver , IRQ domain
* support is used ( with mapping between virtual and hardware IRQs ) .
*/
2016-01-14 08:07:55 +10:00
# include <linux/irq.h>
2019-06-10 00:07:56 +02:00
# include <linux/pci.h>
2015-04-20 16:55:21 -04:00
# include <drm/drm_crtc_helper.h>
2019-06-10 00:07:56 +02:00
# include <drm/drm_irq.h>
# include <drm/drm_vblank.h>
2015-04-20 16:55:21 -04:00
# include <drm/amdgpu_drm.h>
# include "amdgpu.h"
# include "amdgpu_ih.h"
# include "atom.h"
# include "amdgpu_connectors.h"
2016-12-06 03:41:55 -05:00
# include "amdgpu_trace.h"
2018-09-17 15:18:37 +02:00
# include "amdgpu_amdkfd.h"
2019-11-18 18:17:12 +08:00
# include "amdgpu_ras.h"
2015-04-20 16:55:21 -04:00
# include <linux/pm_runtime.h>
2017-09-12 15:58:20 -04:00
# ifdef CONFIG_DRM_AMD_DC
# include "amdgpu_dm_irq.h"
# endif
2015-04-20 16:55:21 -04:00
# define AMDGPU_WAIT_IDLE_TIMEOUT 200
/**
2018-06-07 17:27:07 -04:00
* amdgpu_hotplug_work_func - work handler for display hotplug event
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* @ work : work struct pointer
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* This is the hotplug event work handler ( all ASICs ) .
* The work gets scheduled from the IRQ handler if there
* was a hotplug interrupt . It walks through the connector table
* and calls hotplug handler for each connector . After this , it sends
* a DRM hotplug event to alert userspace .
*
* This design approach is required in order to defer hotplug event handling
* from the IRQ handler to a work handler because hotplug handler has to use
* mutexes which cannot be locked in an IRQ handler ( since & mutex_lock may
* sleep ) .
2015-04-20 16:55:21 -04:00
*/
static void amdgpu_hotplug_work_func ( struct work_struct * work )
{
struct amdgpu_device * adev = container_of ( work , struct amdgpu_device ,
hotplug_work ) ;
struct drm_device * dev = adev - > ddev ;
struct drm_mode_config * mode_config = & dev - > mode_config ;
struct drm_connector * connector ;
2019-09-03 16:46:01 -04:00
struct drm_connector_list_iter iter ;
2015-04-20 16:55:21 -04:00
2015-05-15 11:52:18 -04:00
mutex_lock ( & mode_config - > mutex ) ;
2019-09-03 16:46:01 -04:00
drm_connector_list_iter_begin ( dev , & iter ) ;
drm_for_each_connector_iter ( connector , & iter )
2016-12-14 00:08:04 +01:00
amdgpu_connector_hotplug ( connector ) ;
2019-09-03 16:46:01 -04:00
drm_connector_list_iter_end ( & iter ) ;
2015-05-15 11:52:18 -04:00
mutex_unlock ( & mode_config - > mutex ) ;
2015-04-20 16:55:21 -04:00
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event ( dev ) ;
}
2018-06-07 17:27:07 -04:00
/**
* amdgpu_irq_disable_all - disable * all * interrupts
*
* @ adev : amdgpu device pointer
*
* Disable all types of interrupts from all sources .
*/
2018-01-18 19:05:36 -05:00
void amdgpu_irq_disable_all ( struct amdgpu_device * adev )
2015-04-20 16:55:21 -04:00
{
unsigned long irqflags ;
2016-03-29 18:28:50 -04:00
unsigned i , j , k ;
2015-04-20 16:55:21 -04:00
int r ;
spin_lock_irqsave ( & adev - > irq . lock , irqflags ) ;
2018-09-17 15:29:28 +02:00
for ( i = 0 ; i < AMDGPU_IRQ_CLIENTID_MAX ; + + i ) {
2016-03-29 18:28:50 -04:00
if ( ! adev - > irq . client [ i ] . sources )
2015-04-20 16:55:21 -04:00
continue ;
2016-03-29 18:28:50 -04:00
for ( j = 0 ; j < AMDGPU_MAX_IRQ_SRC_ID ; + + j ) {
struct amdgpu_irq_src * src = adev - > irq . client [ i ] . sources [ j ] ;
if ( ! src | | ! src - > funcs - > set | | ! src - > num_types )
continue ;
for ( k = 0 ; k < src - > num_types ; + + k ) {
atomic_set ( & src - > enabled_types [ k ] , 0 ) ;
r = src - > funcs - > set ( adev , src , k ,
AMDGPU_IRQ_STATE_DISABLE ) ;
if ( r )
DRM_ERROR ( " error disabling interrupt (%d) \n " ,
r ) ;
}
2015-04-20 16:55:21 -04:00
}
}
spin_unlock_irqrestore ( & adev - > irq . lock , irqflags ) ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_handler - IRQ handler
*
* @ irq : IRQ number ( unused )
* @ arg : pointer to DRM device
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* IRQ handler for amdgpu driver ( all ASICs ) .
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Returns :
* result of handling the IRQ , as defined by & irqreturn_t
2015-04-20 16:55:21 -04:00
*/
irqreturn_t amdgpu_irq_handler ( int irq , void * arg )
{
struct drm_device * dev = ( struct drm_device * ) arg ;
struct amdgpu_device * adev = dev - > dev_private ;
irqreturn_t ret ;
2019-01-09 15:36:29 +01:00
ret = amdgpu_ih_process ( adev , & adev - > irq . ih ) ;
2015-04-20 16:55:21 -04:00
if ( ret = = IRQ_HANDLED )
pm_runtime_mark_last_busy ( dev - > dev ) ;
2019-06-05 14:40:57 +08:00
/* For the hardware that cannot enable bif ring for both ras_controller_irq
* and ras_err_evnet_athub_irq ih cookies , the driver has to poll status
* register to check whether the interrupt is triggered or not , and properly
* ack the interrupt if it is there
*/
2019-11-18 18:17:12 +08:00
if ( amdgpu_ras_is_supported ( adev , AMDGPU_RAS_BLOCK__PCIE_BIF ) ) {
if ( adev - > nbio . funcs & &
adev - > nbio . funcs - > handle_ras_controller_intr_no_bifring )
adev - > nbio . funcs - > handle_ras_controller_intr_no_bifring ( adev ) ;
if ( adev - > nbio . funcs & &
adev - > nbio . funcs - > handle_ras_err_event_athub_intr_no_bifring )
adev - > nbio . funcs - > handle_ras_err_event_athub_intr_no_bifring ( adev ) ;
}
2019-06-05 14:40:57 +08:00
2015-04-20 16:55:21 -04:00
return ret ;
}
2018-09-26 13:45:38 +02:00
/**
* amdgpu_irq_handle_ih1 - kick of processing for IH1
*
* @ work : work structure in struct amdgpu_irq
*
* Kick of processing IH ring 1.
*/
static void amdgpu_irq_handle_ih1 ( struct work_struct * work )
{
struct amdgpu_device * adev = container_of ( work , struct amdgpu_device ,
irq . ih1_work ) ;
2019-01-09 15:36:29 +01:00
amdgpu_ih_process ( adev , & adev - > irq . ih1 ) ;
2018-09-26 13:45:38 +02:00
}
/**
* amdgpu_irq_handle_ih2 - kick of processing for IH2
*
* @ work : work structure in struct amdgpu_irq
*
* Kick of processing IH ring 2.
*/
static void amdgpu_irq_handle_ih2 ( struct work_struct * work )
{
struct amdgpu_device * adev = container_of ( work , struct amdgpu_device ,
irq . ih2_work ) ;
2019-01-09 15:36:29 +01:00
amdgpu_ih_process ( adev , & adev - > irq . ih2 ) ;
2018-09-26 13:45:38 +02:00
}
2015-04-20 16:55:21 -04:00
/**
2018-06-07 17:27:07 -04:00
* amdgpu_msi_ok - check whether MSI functionality is enabled
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* @ adev : amdgpu device pointer ( unused )
*
* Checks whether MSI functionality has been disabled via module parameter
* ( all ASICs ) .
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Returns :
* * true * if MSIs are allowed to be enabled or * false * otherwise
2015-04-20 16:55:21 -04:00
*/
static bool amdgpu_msi_ok ( struct amdgpu_device * adev )
{
if ( amdgpu_msi = = 1 )
return true ;
else if ( amdgpu_msi = = 0 )
return false ;
return true ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_init - initialize interrupt handling
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
*
2018-06-07 17:27:07 -04:00
* Sets up work functions for hotplug and reset interrupts , enables MSI
* functionality , initializes vblank , hotplug and reset interrupt handling .
*
* Returns :
* 0 on success or error code on failure
2015-04-20 16:55:21 -04:00
*/
int amdgpu_irq_init ( struct amdgpu_device * adev )
{
int r = 0 ;
spin_lock_init ( & adev - > irq . lock ) ;
2017-06-21 03:44:56 +02:00
2018-06-07 17:27:07 -04:00
/* Enable MSI if not disabled by module parameter */
2015-04-20 16:55:21 -04:00
adev - > irq . msi_enabled = false ;
if ( amdgpu_msi_ok ( adev ) ) {
2019-10-03 10:34:30 -05:00
int nvec = pci_msix_vec_count ( adev - > pdev ) ;
unsigned int flags ;
if ( nvec < = 0 ) {
flags = PCI_IRQ_MSI ;
} else {
flags = PCI_IRQ_MSI | PCI_IRQ_MSIX ;
}
/* we only need one vector */
nvec = pci_alloc_irq_vectors ( adev - > pdev , 1 , 1 , flags ) ;
2019-10-01 15:52:31 -04:00
if ( nvec > 0 ) {
2015-04-20 16:55:21 -04:00
adev - > irq . msi_enabled = true ;
2019-10-03 10:34:30 -05:00
dev_dbg ( adev - > dev , " amdgpu: using MSI/MSI-X. \n " ) ;
2015-04-20 16:55:21 -04:00
}
}
2017-09-12 15:58:20 -04:00
if ( ! amdgpu_device_has_dc_support ( adev ) ) {
if ( ! adev - > enable_virtual_display )
2018-06-07 17:27:07 -04:00
/* Disable vblank IRQs aggressively for power-saving */
2017-09-12 15:58:20 -04:00
/* XXX: can this be enabled for DC? */
adev - > ddev - > vblank_disable_immediate = true ;
r = drm_vblank_init ( adev - > ddev , adev - > mode_info . num_crtc ) ;
if ( r )
return r ;
2018-06-07 17:27:07 -04:00
/* Pre-DCE11 */
2017-09-12 15:58:20 -04:00
INIT_WORK ( & adev - > hotplug_work ,
amdgpu_hotplug_work_func ) ;
}
2018-09-26 13:45:38 +02:00
INIT_WORK ( & adev - > irq . ih1_work , amdgpu_irq_handle_ih1 ) ;
INIT_WORK ( & adev - > irq . ih2_work , amdgpu_irq_handle_ih2 ) ;
2015-04-20 16:55:21 -04:00
adev - > irq . installed = true ;
2019-10-03 10:34:30 -05:00
/* Use vector 0 for MSI-X */
r = drm_irq_install ( adev - > ddev , pci_irq_vector ( adev - > pdev , 0 ) ) ;
2015-04-20 16:55:21 -04:00
if ( r ) {
adev - > irq . installed = false ;
2018-01-19 19:02:16 +08:00
if ( ! amdgpu_device_has_dc_support ( adev ) )
flush_work ( & adev - > hotplug_work ) ;
2015-04-20 16:55:21 -04:00
return r ;
}
2018-01-18 19:05:36 -05:00
adev - > ddev - > max_vblank_count = 0x00ffffff ;
2015-04-20 16:55:21 -04:00
2017-10-26 09:30:38 +08:00
DRM_DEBUG ( " amdgpu: irq initialized. \n " ) ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_fini - shut down interrupt handling
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
*
2018-06-07 17:27:07 -04:00
* Tears down work functions for hotplug and reset interrupts , disables MSI
* functionality , shuts down vblank , hotplug and reset interrupt handling ,
* turns off interrupts from all sources ( all ASICs ) .
2015-04-20 16:55:21 -04:00
*/
void amdgpu_irq_fini ( struct amdgpu_device * adev )
{
2016-03-29 18:28:50 -04:00
unsigned i , j ;
2015-04-20 16:55:21 -04:00
if ( adev - > irq . installed ) {
drm_irq_uninstall ( adev - > ddev ) ;
adev - > irq . installed = false ;
if ( adev - > irq . msi_enabled )
2019-11-07 10:26:43 +08:00
pci_free_irq_vectors ( adev - > pdev ) ;
2018-01-19 19:02:16 +08:00
if ( ! amdgpu_device_has_dc_support ( adev ) )
flush_work ( & adev - > hotplug_work ) ;
2015-04-20 16:55:21 -04:00
}
2018-09-17 15:29:28 +02:00
for ( i = 0 ; i < AMDGPU_IRQ_CLIENTID_MAX ; + + i ) {
2016-03-29 18:28:50 -04:00
if ( ! adev - > irq . client [ i ] . sources )
2015-04-20 16:55:21 -04:00
continue ;
2016-03-29 18:28:50 -04:00
for ( j = 0 ; j < AMDGPU_MAX_IRQ_SRC_ID ; + + j ) {
struct amdgpu_irq_src * src = adev - > irq . client [ i ] . sources [ j ] ;
if ( ! src )
continue ;
kfree ( src - > enabled_types ) ;
src - > enabled_types = NULL ;
if ( src - > data ) {
kfree ( src - > data ) ;
kfree ( src ) ;
adev - > irq . client [ i ] . sources [ j ] = NULL ;
}
2015-07-28 14:24:53 -04:00
}
2016-03-29 18:28:50 -04:00
kfree ( adev - > irq . client [ i ] . sources ) ;
2018-03-20 16:28:56 +08:00
adev - > irq . client [ i ] . sources = NULL ;
2015-04-20 16:55:21 -04:00
}
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_add_id - register IRQ source
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
2018-06-07 17:27:07 -04:00
* @ client_id : client id
* @ src_id : source id
* @ source : IRQ source pointer
*
* Registers IRQ source on a client .
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Returns :
* 0 on success or error code otherwise
2015-04-20 16:55:21 -04:00
*/
2016-03-29 18:28:50 -04:00
int amdgpu_irq_add_id ( struct amdgpu_device * adev ,
unsigned client_id , unsigned src_id ,
2015-04-20 16:55:21 -04:00
struct amdgpu_irq_src * source )
{
2018-09-17 15:29:28 +02:00
if ( client_id > = AMDGPU_IRQ_CLIENTID_MAX )
2015-04-20 16:55:21 -04:00
return - EINVAL ;
2016-03-29 18:28:50 -04:00
if ( src_id > = AMDGPU_MAX_IRQ_SRC_ID )
2015-04-20 16:55:21 -04:00
return - EINVAL ;
if ( ! source - > funcs )
return - EINVAL ;
2016-03-29 18:28:50 -04:00
if ( ! adev - > irq . client [ client_id ] . sources ) {
2017-04-05 11:46:12 +02:00
adev - > irq . client [ client_id ] . sources =
kcalloc ( AMDGPU_MAX_IRQ_SRC_ID ,
sizeof ( struct amdgpu_irq_src * ) ,
GFP_KERNEL ) ;
2016-03-29 18:28:50 -04:00
if ( ! adev - > irq . client [ client_id ] . sources )
return - ENOMEM ;
}
if ( adev - > irq . client [ client_id ] . sources [ src_id ] ! = NULL )
return - EINVAL ;
2015-04-20 16:55:21 -04:00
if ( source - > num_types & & ! source - > enabled_types ) {
atomic_t * types ;
types = kcalloc ( source - > num_types , sizeof ( atomic_t ) ,
GFP_KERNEL ) ;
if ( ! types )
return - ENOMEM ;
source - > enabled_types = types ;
}
2016-03-29 18:28:50 -04:00
adev - > irq . client [ client_id ] . sources [ src_id ] = source ;
2015-04-20 16:55:21 -04:00
return 0 ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_dispatch - dispatch IRQ to IP blocks
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
2019-09-19 14:40:57 -05:00
* @ ih : interrupt ring instance
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Dispatches IRQ to IP blocks .
2015-04-20 16:55:21 -04:00
*/
void amdgpu_irq_dispatch ( struct amdgpu_device * adev ,
2019-01-09 15:36:29 +01:00
struct amdgpu_ih_ring * ih )
2015-04-20 16:55:21 -04:00
{
2019-01-09 15:36:29 +01:00
u32 ring_index = ih - > rptr > > 2 ;
struct amdgpu_iv_entry entry ;
unsigned client_id , src_id ;
2015-04-20 16:55:21 -04:00
struct amdgpu_irq_src * src ;
2018-09-26 11:08:32 +02:00
bool handled = false ;
2015-04-20 16:55:21 -04:00
int r ;
2019-01-09 15:36:29 +01:00
entry . iv_entry = ( const uint32_t * ) & ih - > ring [ ring_index ] ;
amdgpu_ih_decode_iv ( adev , & entry ) ;
trace_amdgpu_iv ( ih - & adev - > irq . ih , & entry ) ;
client_id = entry . client_id ;
src_id = entry . src_id ;
2018-09-17 15:29:28 +02:00
if ( client_id > = AMDGPU_IRQ_CLIENTID_MAX ) {
2016-03-29 18:28:50 -04:00
DRM_DEBUG ( " Invalid client_id in IV: %d \n " , client_id ) ;
2018-09-26 11:08:32 +02:00
} else if ( src_id > = AMDGPU_MAX_IRQ_SRC_ID ) {
2015-04-20 16:55:21 -04:00
DRM_DEBUG ( " Invalid src_id in IV: %d \n " , src_id ) ;
2018-09-26 11:08:32 +02:00
} else if ( adev - > irq . virq [ src_id ] ) {
2015-11-06 01:29:08 -05:00
generic_handle_irq ( irq_find_mapping ( adev - > irq . domain , src_id ) ) ;
2016-03-29 18:28:50 -04:00
2018-09-26 11:08:32 +02:00
} else if ( ! adev - > irq . client [ client_id ] . sources ) {
DRM_DEBUG ( " Unregistered interrupt client_id: %d src_id: %d \n " ,
client_id , src_id ) ;
2015-04-20 16:55:21 -04:00
2018-09-26 11:08:32 +02:00
} else if ( ( src = adev - > irq . client [ client_id ] . sources [ src_id ] ) ) {
2019-01-09 15:36:29 +01:00
r = src - > funcs - > process ( adev , src , & entry ) ;
2018-09-26 11:08:32 +02:00
if ( r < 0 )
2015-11-06 01:29:08 -05:00
DRM_ERROR ( " error processing interrupt (%d) \n " , r ) ;
2018-09-26 11:08:32 +02:00
else if ( r )
handled = true ;
} else {
DRM_DEBUG ( " Unhandled interrupt src_id: %d \n " , src_id ) ;
2015-11-06 01:29:08 -05:00
}
2018-09-26 11:08:32 +02:00
/* Send it to amdkfd as well if it isn't already handled */
if ( ! handled )
2019-01-09 15:36:29 +01:00
amdgpu_amdkfd_interrupt ( adev , entry . iv_entry ) ;
2015-04-20 16:55:21 -04:00
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_update - update hardware interrupt state
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
2018-06-07 17:27:07 -04:00
* @ src : interrupt source pointer
* @ type : type of interrupt
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Updates interrupt state for the specific source ( all ASICs ) .
2015-04-20 16:55:21 -04:00
*/
int amdgpu_irq_update ( struct amdgpu_device * adev ,
struct amdgpu_irq_src * src , unsigned type )
{
unsigned long irqflags ;
enum amdgpu_interrupt_state state ;
int r ;
spin_lock_irqsave ( & adev - > irq . lock , irqflags ) ;
2018-06-07 17:27:07 -04:00
/* We need to determine after taking the lock, otherwise
2015-04-20 16:55:21 -04:00
we might disable just enabled interrupts again */
if ( amdgpu_irq_enabled ( adev , src , type ) )
state = AMDGPU_IRQ_STATE_ENABLE ;
else
state = AMDGPU_IRQ_STATE_DISABLE ;
r = src - > funcs - > set ( adev , src , type , state ) ;
spin_unlock_irqrestore ( & adev - > irq . lock , irqflags ) ;
return r ;
}
2018-06-07 17:27:07 -04:00
/**
* amdgpu_irq_gpu_reset_resume_helper - update interrupt states on all sources
*
* @ adev : amdgpu device pointer
*
* Updates state of all types of interrupts on all sources on resume after
* reset .
*/
2016-06-16 16:54:53 +08:00
void amdgpu_irq_gpu_reset_resume_helper ( struct amdgpu_device * adev )
{
2016-03-29 18:28:50 -04:00
int i , j , k ;
2018-09-17 15:29:28 +02:00
for ( i = 0 ; i < AMDGPU_IRQ_CLIENTID_MAX ; + + i ) {
2016-03-29 18:28:50 -04:00
if ( ! adev - > irq . client [ i ] . sources )
2016-06-16 16:54:53 +08:00
continue ;
2016-03-29 18:28:50 -04:00
for ( j = 0 ; j < AMDGPU_MAX_IRQ_SRC_ID ; + + j ) {
struct amdgpu_irq_src * src = adev - > irq . client [ i ] . sources [ j ] ;
if ( ! src )
continue ;
for ( k = 0 ; k < src - > num_types ; k + + )
amdgpu_irq_update ( adev , src , k ) ;
}
2016-06-16 16:54:53 +08:00
}
}
2015-04-20 16:55:21 -04:00
/**
* amdgpu_irq_get - enable interrupt
*
* @ adev : amdgpu device pointer
2018-06-07 17:27:07 -04:00
* @ src : interrupt source pointer
* @ type : type of interrupt
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Enables specified type of interrupt on the specified source ( all ASICs ) .
*
* Returns :
* 0 on success or error code otherwise
2015-04-20 16:55:21 -04:00
*/
int amdgpu_irq_get ( struct amdgpu_device * adev , struct amdgpu_irq_src * src ,
unsigned type )
{
if ( ! adev - > ddev - > irq_enabled )
return - ENOENT ;
if ( type > = src - > num_types )
return - EINVAL ;
if ( ! src - > enabled_types | | ! src - > funcs - > set )
return - EINVAL ;
if ( atomic_inc_return ( & src - > enabled_types [ type ] ) = = 1 )
return amdgpu_irq_update ( adev , src , type ) ;
return 0 ;
}
/**
* amdgpu_irq_put - disable interrupt
*
* @ adev : amdgpu device pointer
2018-06-07 17:27:07 -04:00
* @ src : interrupt source pointer
* @ type : type of interrupt
*
* Enables specified type of interrupt on the specified source ( all ASICs ) .
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Returns :
* 0 on success or error code otherwise
2015-04-20 16:55:21 -04:00
*/
int amdgpu_irq_put ( struct amdgpu_device * adev , struct amdgpu_irq_src * src ,
unsigned type )
{
if ( ! adev - > ddev - > irq_enabled )
return - ENOENT ;
if ( type > = src - > num_types )
return - EINVAL ;
if ( ! src - > enabled_types | | ! src - > funcs - > set )
return - EINVAL ;
if ( atomic_dec_and_test ( & src - > enabled_types [ type ] ) )
return amdgpu_irq_update ( adev , src , type ) ;
return 0 ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_enabled - check whether interrupt is enabled or not
2015-04-20 16:55:21 -04:00
*
* @ adev : amdgpu device pointer
2018-06-07 17:27:07 -04:00
* @ src : interrupt source pointer
* @ type : type of interrupt
2015-04-20 16:55:21 -04:00
*
2018-06-07 17:27:07 -04:00
* Checks whether the given type of interrupt is enabled on the given source .
*
* Returns :
* * true * if interrupt is enabled , * false * if interrupt is disabled or on
* invalid parameters
2015-04-20 16:55:21 -04:00
*/
bool amdgpu_irq_enabled ( struct amdgpu_device * adev , struct amdgpu_irq_src * src ,
unsigned type )
{
if ( ! adev - > ddev - > irq_enabled )
return false ;
if ( type > = src - > num_types )
return false ;
if ( ! src - > enabled_types | | ! src - > funcs - > set )
return false ;
return ! ! atomic_read ( & src - > enabled_types [ type ] ) ;
}
2015-11-06 01:29:08 -05:00
2018-06-07 17:27:07 -04:00
/* XXX: Generic IRQ handling */
2015-11-06 01:29:08 -05:00
static void amdgpu_irq_mask ( struct irq_data * irqd )
{
/* XXX */
}
static void amdgpu_irq_unmask ( struct irq_data * irqd )
{
/* XXX */
}
2018-06-07 17:27:07 -04:00
/* amdgpu hardware interrupt chip descriptor */
2015-11-06 01:29:08 -05:00
static struct irq_chip amdgpu_irq_chip = {
. name = " amdgpu-ih " ,
. irq_mask = amdgpu_irq_mask ,
. irq_unmask = amdgpu_irq_unmask ,
} ;
2018-06-07 17:27:07 -04:00
/**
* amdgpu_irqdomain_map - create mapping between virtual and hardware IRQ numbers
*
* @ d : amdgpu IRQ domain pointer ( unused )
* @ irq : virtual IRQ number
* @ hwirq : hardware irq number
*
* Current implementation assigns simple interrupt handler to the given virtual
* IRQ .
*
* Returns :
* 0 on success or error code otherwise
*/
2015-11-06 01:29:08 -05:00
static int amdgpu_irqdomain_map ( struct irq_domain * d ,
unsigned int irq , irq_hw_number_t hwirq )
{
if ( hwirq > = AMDGPU_MAX_IRQ_SRC_ID )
return - EPERM ;
irq_set_chip_and_handler ( irq ,
& amdgpu_irq_chip , handle_simple_irq ) ;
return 0 ;
}
2018-06-07 17:27:07 -04:00
/* Implementation of methods for amdgpu IRQ domain */
2016-04-10 16:29:59 +02:00
static const struct irq_domain_ops amdgpu_hw_irqdomain_ops = {
2015-11-06 01:29:08 -05:00
. map = amdgpu_irqdomain_map ,
} ;
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_add_domain - create a linear IRQ domain
2015-11-06 01:29:08 -05:00
*
* @ adev : amdgpu device pointer
*
2018-06-07 17:27:07 -04:00
* Creates an IRQ domain for GPU interrupt sources
2015-11-06 01:29:08 -05:00
* that may be driven by another driver ( e . g . , ACP ) .
2018-06-07 17:27:07 -04:00
*
* Returns :
* 0 on success or error code otherwise
2015-11-06 01:29:08 -05:00
*/
int amdgpu_irq_add_domain ( struct amdgpu_device * adev )
{
adev - > irq . domain = irq_domain_add_linear ( NULL , AMDGPU_MAX_IRQ_SRC_ID ,
& amdgpu_hw_irqdomain_ops , adev ) ;
if ( ! adev - > irq . domain ) {
DRM_ERROR ( " GPU irq add domain failed \n " ) ;
return - ENODEV ;
}
return 0 ;
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_remove_domain - remove the IRQ domain
2015-11-06 01:29:08 -05:00
*
* @ adev : amdgpu device pointer
*
2018-06-07 17:27:07 -04:00
* Removes the IRQ domain for GPU interrupt sources
2015-11-06 01:29:08 -05:00
* that may be driven by another driver ( e . g . , ACP ) .
*/
void amdgpu_irq_remove_domain ( struct amdgpu_device * adev )
{
if ( adev - > irq . domain ) {
irq_domain_remove ( adev - > irq . domain ) ;
adev - > irq . domain = NULL ;
}
}
/**
2018-06-07 17:27:07 -04:00
* amdgpu_irq_create_mapping - create mapping between domain Linux IRQs
2015-11-06 01:29:08 -05:00
*
* @ adev : amdgpu device pointer
* @ src_id : IH source id
*
2018-06-07 17:27:07 -04:00
* Creates mapping between a domain IRQ ( GPU IH src id ) and a Linux IRQ
2015-11-06 01:29:08 -05:00
* Use this for components that generate a GPU interrupt , but are driven
* by a different driver ( e . g . , ACP ) .
2018-06-07 17:27:07 -04:00
*
* Returns :
* Linux IRQ
2015-11-06 01:29:08 -05:00
*/
unsigned amdgpu_irq_create_mapping ( struct amdgpu_device * adev , unsigned src_id )
{
adev - > irq . virq [ src_id ] = irq_create_mapping ( adev - > irq . domain , src_id ) ;
return adev - > irq . virq [ src_id ] ;
}