drm/xe: Use atomic instead of mutex for xe_device_mem_access_ongoing
xe_guc_ct_fast_path() is called from an irq context, and cannot lock the mutex used by xe_device_mem_access_ongoing(). Fortunately it is easy to fix, and the atomic guarantees are good enough to ensure xe->mem_access.hold_rpm is set before last ref is dropped. As far as I can tell, the runtime ref in device access should be killable, but don't dare to do it yet. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Reviewed-by: Matthew Brost <matthew.brost@intel.com> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
parent
044f0cfb19
commit
38c04b47ce
@ -206,8 +206,6 @@ struct xe_device *xe_device_create(struct pci_dev *pdev,
|
||||
if (err)
|
||||
goto err_put;
|
||||
|
||||
drmm_mutex_init(&xe->drm, &xe->mem_access.lock);
|
||||
|
||||
return xe;
|
||||
|
||||
err_put:
|
||||
@ -354,25 +352,25 @@ u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
|
||||
void xe_device_mem_access_get(struct xe_device *xe)
|
||||
{
|
||||
bool resumed = xe_pm_runtime_resume_if_suspended(xe);
|
||||
int ref = atomic_inc_return(&xe->mem_access.ref);
|
||||
|
||||
mutex_lock(&xe->mem_access.lock);
|
||||
if (xe->mem_access.ref++ == 0)
|
||||
if (ref == 1)
|
||||
xe->mem_access.hold_rpm = xe_pm_runtime_get_if_active(xe);
|
||||
mutex_unlock(&xe->mem_access.lock);
|
||||
|
||||
/* The usage counter increased if device was immediately resumed */
|
||||
if (resumed)
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
XE_WARN_ON(xe->mem_access.ref == S32_MAX);
|
||||
XE_WARN_ON(ref == S32_MAX);
|
||||
}
|
||||
|
||||
void xe_device_mem_access_put(struct xe_device *xe)
|
||||
{
|
||||
mutex_lock(&xe->mem_access.lock);
|
||||
if (--xe->mem_access.ref == 0 && xe->mem_access.hold_rpm)
|
||||
xe_pm_runtime_put(xe);
|
||||
mutex_unlock(&xe->mem_access.lock);
|
||||
bool hold = xe->mem_access.hold_rpm;
|
||||
int ref = atomic_dec_return(&xe->mem_access.ref);
|
||||
|
||||
XE_WARN_ON(xe->mem_access.ref < 0);
|
||||
if (!ref && hold)
|
||||
xe_pm_runtime_put(xe);
|
||||
|
||||
XE_WARN_ON(ref < 0);
|
||||
}
|
||||
|
@ -90,20 +90,14 @@ static inline struct xe_force_wake * gt_to_fw(struct xe_gt *gt)
|
||||
void xe_device_mem_access_get(struct xe_device *xe);
|
||||
void xe_device_mem_access_put(struct xe_device *xe);
|
||||
|
||||
static inline void xe_device_assert_mem_access(struct xe_device *xe)
|
||||
{
|
||||
XE_WARN_ON(!xe->mem_access.ref);
|
||||
}
|
||||
|
||||
static inline bool xe_device_mem_access_ongoing(struct xe_device *xe)
|
||||
{
|
||||
bool ret;
|
||||
return atomic_read(&xe->mem_access.ref);
|
||||
}
|
||||
|
||||
mutex_lock(&xe->mem_access.lock);
|
||||
ret = xe->mem_access.ref;
|
||||
mutex_unlock(&xe->mem_access.lock);
|
||||
|
||||
return ret;
|
||||
static inline void xe_device_assert_mem_access(struct xe_device *xe)
|
||||
{
|
||||
XE_WARN_ON(!xe_device_mem_access_ongoing(xe));
|
||||
}
|
||||
|
||||
static inline bool xe_device_in_fault_mode(struct xe_device *xe)
|
||||
|
@ -184,10 +184,8 @@ struct xe_device {
|
||||
* triggering additional actions when they occur.
|
||||
*/
|
||||
struct {
|
||||
/** @lock: protect the ref count */
|
||||
struct mutex lock;
|
||||
/** @ref: ref count of memory accesses */
|
||||
s32 ref;
|
||||
atomic_t ref;
|
||||
/** @hold_rpm: need to put rpm ref back at the end */
|
||||
bool hold_rpm;
|
||||
} mem_access;
|
||||
|
Loading…
x
Reference in New Issue
Block a user