drm/amdgpu: move the VRAM lost counter per context
Instead of per device track the VRAM lost per context and return ECANCELED instead of ENODEV. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Nicolai Hähnle <nicolai.haehnle@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
parent
14e47f93c5
commit
e55f2b646d
@ -732,10 +732,11 @@ struct amdgpu_ctx {
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_queue_mgr queue_mgr;
|
||||
unsigned reset_counter;
|
||||
uint32_t vram_lost_counter;
|
||||
spinlock_t ring_lock;
|
||||
struct dma_fence **fences;
|
||||
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
|
||||
bool preamble_presented;
|
||||
bool preamble_presented;
|
||||
enum amd_sched_priority init_priority;
|
||||
enum amd_sched_priority override_priority;
|
||||
struct mutex lock;
|
||||
@ -778,7 +779,6 @@ struct amdgpu_fpriv {
|
||||
struct mutex bo_list_lock;
|
||||
struct idr bo_list_handles;
|
||||
struct amdgpu_ctx_mgr ctx_mgr;
|
||||
u32 vram_lost_counter;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -1860,8 +1860,6 @@ static inline bool amdgpu_has_atpx(void) { return false; }
|
||||
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
|
||||
extern const int amdgpu_max_kms_ioctl;
|
||||
|
||||
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
|
||||
struct amdgpu_fpriv *fpriv);
|
||||
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
|
||||
void amdgpu_driver_unload_kms(struct drm_device *dev);
|
||||
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
|
||||
|
@ -172,7 +172,11 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
|
||||
p->job->vram_lost_counter = fpriv->vram_lost_counter;
|
||||
p->job->vram_lost_counter = atomic_read(&p->adev->vram_lost_counter);
|
||||
if (p->ctx->vram_lost_counter != p->job->vram_lost_counter) {
|
||||
ret = -ECANCELED;
|
||||
goto free_all_kdata;
|
||||
}
|
||||
|
||||
if (p->uf_entry.robj)
|
||||
p->job->uf_addr = uf_offset;
|
||||
@ -1205,7 +1209,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
{
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
struct amdgpu_cs_parser parser = {};
|
||||
bool reserved_buffers = false;
|
||||
@ -1213,8 +1216,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
||||
|
||||
if (!adev->accel_working)
|
||||
return -EBUSY;
|
||||
if (amdgpu_kms_vram_lost(adev, fpriv))
|
||||
return -ENODEV;
|
||||
|
||||
parser.adev = adev;
|
||||
parser.filp = filp;
|
||||
|
@ -75,6 +75,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
|
||||
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||
ctx->init_priority = priority;
|
||||
ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
|
||||
|
||||
|
@ -789,21 +789,6 @@ void amdgpu_driver_lastclose_kms(struct drm_device *dev)
|
||||
vga_switcheroo_process_delayed_switch();
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_kms_vram_lost - check if VRAM was lost for this client
|
||||
*
|
||||
* @adev: amdgpu device
|
||||
* @fpriv: client private
|
||||
*
|
||||
* Check if all CS is blocked for the client because of lost VRAM
|
||||
*/
|
||||
bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
|
||||
struct amdgpu_fpriv *fpriv)
|
||||
{
|
||||
return fpriv->vram_lost_counter !=
|
||||
atomic_read(&adev->vram_lost_counter);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_driver_open_kms - drm callback for open
|
||||
*
|
||||
@ -860,7 +845,6 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
||||
|
||||
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
|
||||
|
||||
fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||
file_priv->driver_priv = fpriv;
|
||||
|
||||
out_suspend:
|
||||
|
Loading…
x
Reference in New Issue
Block a user