2023-01-17 10:27:23 +01:00
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright ( C ) 2020 - 2023 Intel Corporation
*/
# ifndef __IVPU_PM_H__
# define __IVPU_PM_H__
2024-01-22 13:09:45 +01:00
# include <linux/rwsem.h>
2023-01-17 10:27:23 +01:00
# include <linux/types.h>
struct ivpu_device ;
struct ivpu_pm_info {
struct ivpu_device * vdev ;
2023-11-13 18:02:51 +01:00
struct delayed_work job_timeout_work ;
2023-01-17 10:27:23 +01:00
struct work_struct recovery_work ;
2024-01-22 13:09:45 +01:00
struct rw_semaphore reset_lock ;
2023-05-24 09:48:45 +02:00
atomic_t reset_counter ;
2024-01-22 13:09:45 +01:00
atomic_t reset_pending ;
2023-01-17 10:27:23 +01:00
bool is_warmboot ;
u32 suspend_reschedule_counter ;
} ;
2023-09-01 11:49:49 +02:00
void ivpu_pm_init ( struct ivpu_device * vdev ) ;
2023-01-17 10:27:23 +01:00
void ivpu_pm_enable ( struct ivpu_device * vdev ) ;
void ivpu_pm_disable ( struct ivpu_device * vdev ) ;
2023-03-23 13:54:58 +01:00
void ivpu_pm_cancel_recovery ( struct ivpu_device * vdev ) ;
2023-01-17 10:27:23 +01:00
int ivpu_pm_suspend_cb ( struct device * dev ) ;
int ivpu_pm_resume_cb ( struct device * dev ) ;
int ivpu_pm_runtime_suspend_cb ( struct device * dev ) ;
int ivpu_pm_runtime_resume_cb ( struct device * dev ) ;
void ivpu_pm_reset_prepare_cb ( struct pci_dev * pdev ) ;
void ivpu_pm_reset_done_cb ( struct pci_dev * pdev ) ;
int __must_check ivpu_rpm_get ( struct ivpu_device * vdev ) ;
2023-10-20 12:44:58 +02:00
int __must_check ivpu_rpm_get_if_active ( struct ivpu_device * vdev ) ;
2023-01-17 10:27:23 +01:00
void ivpu_rpm_put ( struct ivpu_device * vdev ) ;
2024-01-22 13:09:45 +01:00
void ivpu_pm_trigger_recovery ( struct ivpu_device * vdev , const char * reason ) ;
2023-11-13 18:02:51 +01:00
void ivpu_start_job_timeout_detection ( struct ivpu_device * vdev ) ;
void ivpu_stop_job_timeout_detection ( struct ivpu_device * vdev ) ;
2023-01-17 10:27:23 +01:00
# endif /* __IVPU_PM_H__ */