drm/amdgpu: rework lock handling for flush_tlb v2
Instead of each implementation doing this more or less correctly move taking the reset lock at a higher level. v2: fix typo Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
3983c9fd2d
commit
e2e3788850
@ -596,8 +596,17 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||||||
!adev->mman.buffer_funcs_enabled ||
|
!adev->mman.buffer_funcs_enabled ||
|
||||||
!adev->ib_pool_ready || amdgpu_in_reset(adev) ||
|
!adev->ib_pool_ready || amdgpu_in_reset(adev) ||
|
||||||
!ring->sched.ready) {
|
!ring->sched.ready) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A GPU reset should flush all TLBs anyway, so no need to do
|
||||||
|
* this while one is ongoing.
|
||||||
|
*/
|
||||||
|
if (!down_read_trylock(&adev->reset_domain->sem))
|
||||||
|
return;
|
||||||
|
|
||||||
adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
|
adev->gmc.gmc_funcs->flush_gpu_tlb(adev, vmid, vmhub,
|
||||||
flush_type);
|
flush_type);
|
||||||
|
up_read(&adev->reset_domain->sem);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,8 +51,6 @@
|
|||||||
#include "athub_v2_0.h"
|
#include "athub_v2_0.h"
|
||||||
#include "athub_v2_1.h"
|
#include "athub_v2_1.h"
|
||||||
|
|
||||||
#include "amdgpu_reset.h"
|
|
||||||
|
|
||||||
static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
|
static int gmc_v10_0_ecc_interrupt_state(struct amdgpu_device *adev,
|
||||||
struct amdgpu_irq_src *src,
|
struct amdgpu_irq_src *src,
|
||||||
unsigned int type,
|
unsigned int type,
|
||||||
@ -265,11 +263,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||||||
* Directly use kiq to do the vm invalidation instead
|
* Directly use kiq to do the vm invalidation instead
|
||||||
*/
|
*/
|
||||||
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
|
if (adev->gfx.kiq[0].ring.sched.ready && !adev->enable_mes &&
|
||||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
|
||||||
down_read_trylock(&adev->reset_domain->sem)) {
|
|
||||||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
|
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
|
||||||
1 << vmid);
|
1 << vmid);
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,6 @@
|
|||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "amdgpu_gem.h"
|
#include "amdgpu_gem.h"
|
||||||
#include "amdgpu_reset.h"
|
|
||||||
|
|
||||||
#include "bif/bif_4_1_d.h"
|
#include "bif/bif_4_1_d.h"
|
||||||
#include "bif/bif_4_1_sh_mask.h"
|
#include "bif/bif_4_1_sh_mask.h"
|
||||||
@ -430,9 +429,6 @@ static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||||||
u32 mask = 0x0;
|
u32 mask = 0x0;
|
||||||
int vmid;
|
int vmid;
|
||||||
|
|
||||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (vmid = 1; vmid < 16; vmid++) {
|
for (vmid = 1; vmid < 16; vmid++) {
|
||||||
u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
|
|
||||||
@ -443,7 +439,6 @@ static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
WREG32(mmVM_INVALIDATE_REQUEST, mask);
|
WREG32(mmVM_INVALIDATE_REQUEST, mask);
|
||||||
RREG32(mmVM_INVALIDATE_RESPONSE);
|
RREG32(mmVM_INVALIDATE_RESPONSE);
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -31,7 +31,6 @@
|
|||||||
#include "amdgpu_ucode.h"
|
#include "amdgpu_ucode.h"
|
||||||
#include "amdgpu_amdkfd.h"
|
#include "amdgpu_amdkfd.h"
|
||||||
#include "amdgpu_gem.h"
|
#include "amdgpu_gem.h"
|
||||||
#include "amdgpu_reset.h"
|
|
||||||
|
|
||||||
#include "gmc/gmc_8_1_d.h"
|
#include "gmc/gmc_8_1_d.h"
|
||||||
#include "gmc/gmc_8_1_sh_mask.h"
|
#include "gmc/gmc_8_1_sh_mask.h"
|
||||||
@ -620,9 +619,6 @@ static void gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||||||
u32 mask = 0x0;
|
u32 mask = 0x0;
|
||||||
int vmid;
|
int vmid;
|
||||||
|
|
||||||
if (!down_read_trylock(&adev->reset_domain->sem))
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (vmid = 1; vmid < 16; vmid++) {
|
for (vmid = 1; vmid < 16; vmid++) {
|
||||||
u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
||||||
|
|
||||||
@ -633,7 +629,6 @@ static void gmc_v8_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
WREG32(mmVM_INVALIDATE_REQUEST, mask);
|
WREG32(mmVM_INVALIDATE_REQUEST, mask);
|
||||||
RREG32(mmVM_INVALIDATE_RESPONSE);
|
RREG32(mmVM_INVALIDATE_RESPONSE);
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -65,8 +65,6 @@
|
|||||||
#include "amdgpu_ras.h"
|
#include "amdgpu_ras.h"
|
||||||
#include "amdgpu_xgmi.h"
|
#include "amdgpu_xgmi.h"
|
||||||
|
|
||||||
#include "amdgpu_reset.h"
|
|
||||||
|
|
||||||
/* add these here since we already include dce12 headers and these are for DCN */
|
/* add these here since we already include dce12 headers and these are for DCN */
|
||||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
|
||||||
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
|
||||||
@ -851,8 +849,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||||||
* as GFXOFF under bare metal
|
* as GFXOFF under bare metal
|
||||||
*/
|
*/
|
||||||
if (adev->gfx.kiq[0].ring.sched.ready &&
|
if (adev->gfx.kiq[0].ring.sched.ready &&
|
||||||
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
|
(amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev))) {
|
||||||
down_read_trylock(&adev->reset_domain->sem)) {
|
|
||||||
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
|
uint32_t req = hub->vm_inv_eng0_req + hub->eng_distance * eng;
|
||||||
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
|
uint32_t ack = hub->vm_inv_eng0_ack + hub->eng_distance * eng;
|
||||||
|
|
||||||
@ -862,7 +859,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
|||||||
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack,
|
amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack,
|
||||||
inv_req2, 1 << vmid);
|
inv_req2, 1 << vmid);
|
||||||
|
|
||||||
up_read(&adev->reset_domain->sem);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user