drm/amdgpu: switch ih handling to two levels (v3)
Newer asics have a two levels of irq ids now: client id - the IP src id - the interrupt src within the IP v2: integrated Christian's comments. v3: fix rebase fail in SI and CIK Signed-off-by: Alex Deucher <alexander.deucher@amd.com> Signed-off-by: Ken Wang <Qingqing.Wang@amd.com> Reviewed-by: Ken Wang <Qingqing.Wang@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
832be4041d
commit
d766e6a393
@ -571,7 +571,9 @@ static const struct amdgpu_irq_src_funcs cgs_irq_funcs = {
|
|||||||
.process = cgs_process_irq,
|
.process = cgs_process_irq,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src_id,
|
static int amdgpu_cgs_add_irq_source(void *cgs_device,
|
||||||
|
unsigned client_id,
|
||||||
|
unsigned src_id,
|
||||||
unsigned num_types,
|
unsigned num_types,
|
||||||
cgs_irq_source_set_func_t set,
|
cgs_irq_source_set_func_t set,
|
||||||
cgs_irq_handler_func_t handler,
|
cgs_irq_handler_func_t handler,
|
||||||
@ -597,7 +599,7 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
|
|||||||
irq_params->handler = handler;
|
irq_params->handler = handler;
|
||||||
irq_params->private_data = private_data;
|
irq_params->private_data = private_data;
|
||||||
source->data = (void *)irq_params;
|
source->data = (void *)irq_params;
|
||||||
ret = amdgpu_irq_add_id(adev, src_id, source);
|
ret = amdgpu_irq_add_id(adev, client_id, src_id, source);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
kfree(irq_params);
|
kfree(irq_params);
|
||||||
kfree(source);
|
kfree(source);
|
||||||
@ -606,16 +608,26 @@ static int amdgpu_cgs_add_irq_source(struct cgs_device *cgs_device, unsigned src
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_cgs_irq_get(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
|
static int amdgpu_cgs_irq_get(void *cgs_device, unsigned client_id,
|
||||||
|
unsigned src_id, unsigned type)
|
||||||
{
|
{
|
||||||
CGS_FUNC_ADEV;
|
CGS_FUNC_ADEV;
|
||||||
return amdgpu_irq_get(adev, adev->irq.sources[src_id], type);
|
|
||||||
|
if (!adev->irq.client[client_id].sources)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return amdgpu_irq_get(adev, adev->irq.client[client_id].sources[src_id], type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_cgs_irq_put(struct cgs_device *cgs_device, unsigned src_id, unsigned type)
|
static int amdgpu_cgs_irq_put(void *cgs_device, unsigned client_id,
|
||||||
|
unsigned src_id, unsigned type)
|
||||||
{
|
{
|
||||||
CGS_FUNC_ADEV;
|
CGS_FUNC_ADEV;
|
||||||
return amdgpu_irq_put(adev, adev->irq.sources[src_id], type);
|
|
||||||
|
if (!adev->irq.client[client_id].sources)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return amdgpu_irq_put(adev, adev->irq.client[client_id].sources[src_id], type);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
static int amdgpu_cgs_set_clockgating_state(struct cgs_device *cgs_device,
|
||||||
|
@ -26,6 +26,10 @@
|
|||||||
|
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
|
|
||||||
|
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
||||||
|
|
||||||
|
#define AMDGPU_IH_CLIENTID_MAX 0x1f
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* R6xx+ IH ring
|
* R6xx+ IH ring
|
||||||
*/
|
*/
|
||||||
@ -47,10 +51,12 @@ struct amdgpu_ih_ring {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_iv_entry {
|
struct amdgpu_iv_entry {
|
||||||
|
unsigned client_id;
|
||||||
unsigned src_id;
|
unsigned src_id;
|
||||||
unsigned src_data;
|
unsigned src_data;
|
||||||
unsigned ring_id;
|
unsigned ring_id;
|
||||||
unsigned vm_id;
|
unsigned vm_id;
|
||||||
|
unsigned vm_id_src;
|
||||||
unsigned pas_id;
|
unsigned pas_id;
|
||||||
const uint32_t *iv_entry;
|
const uint32_t *iv_entry;
|
||||||
};
|
};
|
||||||
|
@ -89,23 +89,28 @@ static void amdgpu_irq_reset_work_func(struct work_struct *work)
|
|||||||
static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
static void amdgpu_irq_disable_all(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
unsigned long irqflags;
|
unsigned long irqflags;
|
||||||
unsigned i, j;
|
unsigned i, j, k;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->irq.lock, irqflags);
|
spin_lock_irqsave(&adev->irq.lock, irqflags);
|
||||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
if (!adev->irq.client[i].sources)
|
||||||
|
|
||||||
if (!src || !src->funcs->set || !src->num_types)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
for (j = 0; j < src->num_types; ++j) {
|
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||||
atomic_set(&src->enabled_types[j], 0);
|
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||||
r = src->funcs->set(adev, src, j,
|
|
||||||
AMDGPU_IRQ_STATE_DISABLE);
|
if (!src || !src->funcs->set || !src->num_types)
|
||||||
if (r)
|
continue;
|
||||||
DRM_ERROR("error disabling interrupt (%d)\n",
|
|
||||||
r);
|
for (k = 0; k < src->num_types; ++k) {
|
||||||
|
atomic_set(&src->enabled_types[k], 0);
|
||||||
|
r = src->funcs->set(adev, src, k,
|
||||||
|
AMDGPU_IRQ_STATE_DISABLE);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("error disabling interrupt (%d)\n",
|
||||||
|
r);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
|
spin_unlock_irqrestore(&adev->irq.lock, irqflags);
|
||||||
@ -254,7 +259,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
void amdgpu_irq_fini(struct amdgpu_device *adev)
|
void amdgpu_irq_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i, j;
|
||||||
|
|
||||||
drm_vblank_cleanup(adev->ddev);
|
drm_vblank_cleanup(adev->ddev);
|
||||||
if (adev->irq.installed) {
|
if (adev->irq.installed) {
|
||||||
@ -266,19 +271,25 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||||||
cancel_work_sync(&adev->reset_work);
|
cancel_work_sync(&adev->reset_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; ++i) {
|
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
if (!adev->irq.client[i].sources)
|
||||||
|
|
||||||
if (!src)
|
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
kfree(src->enabled_types);
|
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||||
src->enabled_types = NULL;
|
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||||
if (src->data) {
|
|
||||||
kfree(src->data);
|
if (!src)
|
||||||
kfree(src);
|
continue;
|
||||||
adev->irq.sources[i] = NULL;
|
|
||||||
|
kfree(src->enabled_types);
|
||||||
|
src->enabled_types = NULL;
|
||||||
|
if (src->data) {
|
||||||
|
kfree(src->data);
|
||||||
|
kfree(src);
|
||||||
|
adev->irq.client[i].sources[j] = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
kfree(adev->irq.client[i].sources);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -290,16 +301,28 @@ void amdgpu_irq_fini(struct amdgpu_device *adev)
|
|||||||
* @source: irq source
|
* @source: irq source
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
int amdgpu_irq_add_id(struct amdgpu_device *adev,
|
||||||
|
unsigned client_id, unsigned src_id,
|
||||||
struct amdgpu_irq_src *source)
|
struct amdgpu_irq_src *source)
|
||||||
{
|
{
|
||||||
|
if (client_id >= AMDGPU_IH_CLIENTID_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
|
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (adev->irq.sources[src_id] != NULL)
|
if (!source->funcs)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!source->funcs)
|
if (!adev->irq.client[client_id].sources) {
|
||||||
|
adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
|
||||||
|
sizeof(struct amdgpu_irq_src),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!adev->irq.client[client_id].sources)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (adev->irq.client[client_id].sources[src_id] != NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (source->num_types && !source->enabled_types) {
|
if (source->num_types && !source->enabled_types) {
|
||||||
@ -313,8 +336,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
|||||||
source->enabled_types = types;
|
source->enabled_types = types;
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->irq.sources[src_id] = source;
|
adev->irq.client[client_id].sources[src_id] = source;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,10 +351,16 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
|||||||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||||
struct amdgpu_iv_entry *entry)
|
struct amdgpu_iv_entry *entry)
|
||||||
{
|
{
|
||||||
|
unsigned client_id = entry->client_id;
|
||||||
unsigned src_id = entry->src_id;
|
unsigned src_id = entry->src_id;
|
||||||
struct amdgpu_irq_src *src;
|
struct amdgpu_irq_src *src;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
if (client_id >= AMDGPU_IH_CLIENTID_MAX) {
|
||||||
|
DRM_DEBUG("Invalid client_id in IV: %d\n", client_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
|
if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) {
|
||||||
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
|
DRM_DEBUG("Invalid src_id in IV: %d\n", src_id);
|
||||||
return;
|
return;
|
||||||
@ -341,7 +369,13 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
|||||||
if (adev->irq.virq[src_id]) {
|
if (adev->irq.virq[src_id]) {
|
||||||
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
|
generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
|
||||||
} else {
|
} else {
|
||||||
src = adev->irq.sources[src_id];
|
if (!adev->irq.client[client_id].sources) {
|
||||||
|
DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
|
||||||
|
client_id, src_id);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
src = adev->irq.client[client_id].sources[src_id];
|
||||||
if (!src) {
|
if (!src) {
|
||||||
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
|
DRM_DEBUG("Unhandled interrupt src_id: %d\n", src_id);
|
||||||
return;
|
return;
|
||||||
@ -385,13 +419,20 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
int i, j;
|
int i, j, k;
|
||||||
for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
|
|
||||||
struct amdgpu_irq_src *src = adev->irq.sources[i];
|
for (i = 0; i < AMDGPU_IH_CLIENTID_MAX; ++i) {
|
||||||
if (!src)
|
if (!adev->irq.client[i].sources)
|
||||||
continue;
|
continue;
|
||||||
for (j = 0; j < src->num_types; j++)
|
|
||||||
amdgpu_irq_update(adev, src, j);
|
for (j = 0; j < AMDGPU_MAX_IRQ_SRC_ID; ++j) {
|
||||||
|
struct amdgpu_irq_src *src = adev->irq.client[i].sources[j];
|
||||||
|
|
||||||
|
if (!src)
|
||||||
|
continue;
|
||||||
|
for (k = 0; k < src->num_types; k++)
|
||||||
|
amdgpu_irq_update(adev, src, k);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include "amdgpu_ih.h"
|
#include "amdgpu_ih.h"
|
||||||
|
|
||||||
#define AMDGPU_MAX_IRQ_SRC_ID 0x100
|
#define AMDGPU_MAX_IRQ_SRC_ID 0x100
|
||||||
|
#define AMDGPU_MAX_IRQ_CLIENT_ID 0x100
|
||||||
|
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
struct amdgpu_iv_entry;
|
struct amdgpu_iv_entry;
|
||||||
@ -44,6 +45,10 @@ struct amdgpu_irq_src {
|
|||||||
void *data;
|
void *data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct amdgpu_irq_client {
|
||||||
|
struct amdgpu_irq_src **sources;
|
||||||
|
};
|
||||||
|
|
||||||
/* provided by interrupt generating IP blocks */
|
/* provided by interrupt generating IP blocks */
|
||||||
struct amdgpu_irq_src_funcs {
|
struct amdgpu_irq_src_funcs {
|
||||||
int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
|
int (*set)(struct amdgpu_device *adev, struct amdgpu_irq_src *source,
|
||||||
@ -58,7 +63,7 @@ struct amdgpu_irq {
|
|||||||
bool installed;
|
bool installed;
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
/* interrupt sources */
|
/* interrupt sources */
|
||||||
struct amdgpu_irq_src *sources[AMDGPU_MAX_IRQ_SRC_ID];
|
struct amdgpu_irq_client client[AMDGPU_IH_CLIENTID_MAX];
|
||||||
|
|
||||||
/* status, etc. */
|
/* status, etc. */
|
||||||
bool msi_enabled; /* msi enabled */
|
bool msi_enabled; /* msi enabled */
|
||||||
@ -80,7 +85,8 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg);
|
|||||||
|
|
||||||
int amdgpu_irq_init(struct amdgpu_device *adev);
|
int amdgpu_irq_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_irq_fini(struct amdgpu_device *adev);
|
void amdgpu_irq_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_irq_add_id(struct amdgpu_device *adev, unsigned src_id,
|
int amdgpu_irq_add_id(struct amdgpu_device *adev,
|
||||||
|
unsigned client_id, unsigned src_id,
|
||||||
struct amdgpu_irq_src *source);
|
struct amdgpu_irq_src *source);
|
||||||
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
void amdgpu_irq_dispatch(struct amdgpu_device *adev,
|
||||||
struct amdgpu_iv_entry *entry);
|
struct amdgpu_iv_entry *entry);
|
||||||
|
@ -6284,11 +6284,13 @@ static int ci_dpm_sw_init(void *handle)
|
|||||||
int ret;
|
int ret;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
|
||||||
|
&adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
|
||||||
|
&adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -248,6 +248,7 @@ static void cik_ih_decode_iv(struct amdgpu_device *adev,
|
|||||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
|
||||||
|
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
|
||||||
entry->src_id = dw[0] & 0xff;
|
entry->src_id = dw[0] & 0xff;
|
||||||
entry->src_data = dw[1] & 0xfffffff;
|
entry->src_data = dw[1] & 0xfffffff;
|
||||||
entry->ring_id = dw[2] & 0xff;
|
entry->ring_id = dw[2] & 0xff;
|
||||||
|
@ -923,17 +923,20 @@ static int cik_sdma_sw_init(void *handle)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* SDMA trap event */
|
/* SDMA trap event */
|
||||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
|
||||||
|
&adev->sdma.trap_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -227,6 +227,7 @@ static void cz_ih_decode_iv(struct amdgpu_device *adev,
|
|||||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
|
||||||
|
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
|
||||||
entry->src_id = dw[0] & 0xff;
|
entry->src_id = dw[0] & 0xff;
|
||||||
entry->src_data = dw[1] & 0xfffffff;
|
entry->src_data = dw[1] & 0xfffffff;
|
||||||
entry->ring_id = dw[2] & 0xff;
|
entry->ring_id = dw[2] & 0xff;
|
||||||
|
@ -2947,19 +2947,19 @@ static int dce_v10_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 8; i < 20; i += 2) {
|
for (i = 8; i < 20; i += 2) {
|
||||||
r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HPD hotplug */
|
/* HPD hotplug */
|
||||||
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -3007,19 +3007,19 @@ static int dce_v11_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 8; i < 20; i += 2) {
|
for (i = 8; i < 20; i += 2) {
|
||||||
r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HPD hotplug */
|
/* HPD hotplug */
|
||||||
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -2295,19 +2295,19 @@ static int dce_v6_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 8; i < 20; i += 2) {
|
for (i = 8; i < 20; i += 2) {
|
||||||
r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HPD hotplug */
|
/* HPD hotplug */
|
||||||
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -2794,19 +2794,19 @@ static int dce_v8_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
for (i = 0; i < adev->mode_info.num_crtc; i++) {
|
||||||
r = amdgpu_irq_add_id(adev, i + 1, &adev->crtc_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i + 1, &adev->crtc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 8; i < 20; i += 2) {
|
for (i = 8; i < 20; i += 2) {
|
||||||
r = amdgpu_irq_add_id(adev, i, &adev->pageflip_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, i, &adev->pageflip_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* HPD hotplug */
|
/* HPD hotplug */
|
||||||
r = amdgpu_irq_add_id(adev, 42, &adev->hpd_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 42, &adev->hpd_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -464,7 +464,7 @@ static int dce_virtual_sw_init(void *handle)
|
|||||||
int r, i;
|
int r, i;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 229, &adev->crtc_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 229, &adev->crtc_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -3242,15 +3242,15 @@ static int gfx_v6_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184, &adev->gfx.priv_reg_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185, &adev->gfx.priv_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -4669,17 +4669,19 @@ static int gfx_v7_0_sw_init(void *handle)
|
|||||||
int i, r;
|
int i, r;
|
||||||
|
|
||||||
/* EOP Event */
|
/* EOP Event */
|
||||||
r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* Privileged reg */
|
/* Privileged reg */
|
||||||
r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
|
||||||
|
&adev->gfx.priv_reg_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* Privileged inst */
|
/* Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
|
||||||
|
&adev->gfx.priv_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -2073,22 +2073,24 @@ static int gfx_v8_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* KIQ event */
|
/* KIQ event */
|
||||||
r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 178, &adev->gfx.kiq.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* EOP Event */
|
/* EOP Event */
|
||||||
r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 181, &adev->gfx.eop_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* Privileged reg */
|
/* Privileged reg */
|
||||||
r = amdgpu_irq_add_id(adev, 184, &adev->gfx.priv_reg_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 184,
|
||||||
|
&adev->gfx.priv_reg_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* Privileged inst */
|
/* Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 185, &adev->gfx.priv_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 185,
|
||||||
|
&adev->gfx.priv_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -823,11 +823,11 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||||||
int dma_bits;
|
int dma_bits;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -969,11 +969,11 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||||||
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -981,11 +981,11 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||||||
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 146, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 147, &adev->mc.vm_fault);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 147, &adev->mc.vm_fault);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -227,6 +227,7 @@ static void iceland_ih_decode_iv(struct amdgpu_device *adev,
|
|||||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
|
||||||
|
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
|
||||||
entry->src_id = dw[0] & 0xff;
|
entry->src_id = dw[0] & 0xff;
|
||||||
entry->src_data = dw[1] & 0xfffffff;
|
entry->src_data = dw[1] & 0xfffffff;
|
||||||
entry->ring_id = dw[2] & 0xff;
|
entry->ring_id = dw[2] & 0xff;
|
||||||
|
@ -2981,11 +2981,13 @@ static int kv_dpm_sw_init(void *handle)
|
|||||||
int ret;
|
int ret;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
|
||||||
|
&adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
|
||||||
|
&adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -566,11 +566,11 @@ int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 135, &adev->virt.rcv_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_irq_add_id(adev, 138, &adev->virt.ack_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
|
||||||
return r;
|
return r;
|
||||||
|
@ -921,17 +921,20 @@ static int sdma_v2_4_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* SDMA trap event */
|
/* SDMA trap event */
|
||||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
|
||||||
|
&adev->sdma.trap_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -1137,17 +1137,20 @@ static int sdma_v3_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* SDMA trap event */
|
/* SDMA trap event */
|
||||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224,
|
||||||
|
&adev->sdma.trap_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 241, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 241,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* SDMA Privileged inst */
|
/* SDMA Privileged inst */
|
||||||
r = amdgpu_irq_add_id(adev, 247, &adev->sdma.illegal_inst_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 247,
|
||||||
|
&adev->sdma.illegal_inst_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -517,12 +517,12 @@ static int si_dma_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* DMA0 trap event */
|
/* DMA0 trap event */
|
||||||
r = amdgpu_irq_add_id(adev, 224, &adev->sdma.trap_irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 224, &adev->sdma.trap_irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* DMA1 trap event */
|
/* DMA1 trap event */
|
||||||
r = amdgpu_irq_add_id(adev, 244, &adev->sdma.trap_irq_1);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 244, &adev->sdma.trap_irq_1);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -7700,11 +7700,11 @@ static int si_dpm_sw_init(void *handle)
|
|||||||
int ret;
|
int ret;
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
|
ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -129,6 +129,7 @@ static void si_ih_decode_iv(struct amdgpu_device *adev,
|
|||||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
|
||||||
|
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
|
||||||
entry->src_id = dw[0] & 0xff;
|
entry->src_id = dw[0] & 0xff;
|
||||||
entry->src_data = dw[1] & 0xfffffff;
|
entry->src_data = dw[1] & 0xfffffff;
|
||||||
entry->ring_id = dw[2] & 0xff;
|
entry->ring_id = dw[2] & 0xff;
|
||||||
|
@ -238,6 +238,7 @@ static void tonga_ih_decode_iv(struct amdgpu_device *adev,
|
|||||||
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
|
||||||
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
|
||||||
|
entry->client_id = AMDGPU_IH_CLIENTID_LEGACY;
|
||||||
entry->src_id = dw[0] & 0xff;
|
entry->src_id = dw[0] & 0xff;
|
||||||
entry->src_data = dw[1] & 0xfffffff;
|
entry->src_data = dw[1] & 0xfffffff;
|
||||||
entry->ring_id = dw[2] & 0xff;
|
entry->ring_id = dw[2] & 0xff;
|
||||||
|
@ -107,7 +107,7 @@ static int uvd_v4_2_sw_init(void *handle)
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* UVD TRAP */
|
/* UVD TRAP */
|
||||||
r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -103,7 +103,7 @@ static int uvd_v5_0_sw_init(void *handle)
|
|||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* UVD TRAP */
|
/* UVD TRAP */
|
||||||
r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -106,7 +106,7 @@ static int uvd_v6_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* UVD TRAP */
|
/* UVD TRAP */
|
||||||
r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 124, &adev->uvd.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -430,7 +430,7 @@ static int vce_v2_0_sw_init(void *handle)
|
|||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* VCE */
|
/* VCE */
|
||||||
r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -383,7 +383,7 @@ static int vce_v3_0_sw_init(void *handle)
|
|||||||
int r, i;
|
int r, i;
|
||||||
|
|
||||||
/* VCE */
|
/* VCE */
|
||||||
r = amdgpu_irq_add_id(adev, 167, &adev->vce.irq);
|
r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 167, &adev->vce.irq);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
@ -66,7 +66,8 @@ typedef int (*cgs_irq_handler_func_t)(void *private_data,
|
|||||||
*
|
*
|
||||||
* Return: 0 on success, -errno otherwise
|
* Return: 0 on success, -errno otherwise
|
||||||
*/
|
*/
|
||||||
typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_id,
|
typedef int (*cgs_add_irq_source_t)(void *cgs_device, unsigned client_id,
|
||||||
|
unsigned src_id,
|
||||||
unsigned num_types,
|
unsigned num_types,
|
||||||
cgs_irq_source_set_func_t set,
|
cgs_irq_source_set_func_t set,
|
||||||
cgs_irq_handler_func_t handler,
|
cgs_irq_handler_func_t handler,
|
||||||
@ -83,7 +84,7 @@ typedef int (*cgs_add_irq_source_t)(struct cgs_device *cgs_device, unsigned src_
|
|||||||
*
|
*
|
||||||
* Return: 0 on success, -errno otherwise
|
* Return: 0 on success, -errno otherwise
|
||||||
*/
|
*/
|
||||||
typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
|
typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cgs_irq_put() - Indicate IRQ source is no longer needed
|
* cgs_irq_put() - Indicate IRQ source is no longer needed
|
||||||
@ -98,7 +99,7 @@ typedef int (*cgs_irq_get_t)(struct cgs_device *cgs_device, unsigned src_id, uns
|
|||||||
*
|
*
|
||||||
* Return: 0 on success, -errno otherwise
|
* Return: 0 on success, -errno otherwise
|
||||||
*/
|
*/
|
||||||
typedef int (*cgs_irq_put_t)(struct cgs_device *cgs_device, unsigned src_id, unsigned type);
|
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned client_id, unsigned src_id, unsigned type);
|
||||||
|
|
||||||
struct cgs_os_ops {
|
struct cgs_os_ops {
|
||||||
/* IRQ handling */
|
/* IRQ handling */
|
||||||
@ -107,12 +108,12 @@ struct cgs_os_ops {
|
|||||||
cgs_irq_put_t irq_put;
|
cgs_irq_put_t irq_put;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
|
#define cgs_add_irq_source(dev,client_id,src_id,num_types,set,handler,private_data) \
|
||||||
CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
|
CGS_OS_CALL(add_irq_source,dev,client_id,src_id,num_types,set,handler, \
|
||||||
private_data)
|
private_data)
|
||||||
#define cgs_irq_get(dev,src_id,type) \
|
#define cgs_irq_get(dev,client_id,src_id,type) \
|
||||||
CGS_OS_CALL(irq_get,dev,src_id,type)
|
CGS_OS_CALL(irq_get,dev,client_id,src_id,type)
|
||||||
#define cgs_irq_put(dev,src_id,type) \
|
#define cgs_irq_put(dev,client_id,src_id,type) \
|
||||||
CGS_OS_CALL(irq_put,dev,src_id,type)
|
CGS_OS_CALL(irq_put,dev,client_id,src_id,type)
|
||||||
|
|
||||||
#endif /* _CGS_LINUX_H */
|
#endif /* _CGS_LINUX_H */
|
||||||
|
Loading…
x
Reference in New Issue
Block a user