2023-01-17 12:27:17 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright ( C ) 2020 - 2023 Intel Corporation
*/
# include <linux/firmware.h>
# include <linux/module.h>
# include <linux/pci.h>
2024-01-22 15:09:43 +03:00
# include <linux/pm_runtime.h>
2023-01-17 12:27:17 +03:00
# include <drm/drm_accel.h>
# include <drm/drm_file.h>
# include <drm/drm_gem.h>
# include <drm/drm_ioctl.h>
2023-01-17 12:27:19 +03:00
# include <drm/drm_prime.h>
2023-01-17 12:27:17 +03:00
2023-01-17 12:27:21 +03:00
# include "vpu_boot_api.h"
2023-05-24 10:48:43 +03:00
# include "ivpu_debugfs.h"
2023-01-17 12:27:17 +03:00
# include "ivpu_drv.h"
2023-01-17 12:27:21 +03:00
# include "ivpu_fw.h"
2024-01-15 16:44:29 +03:00
# include "ivpu_fw_log.h"
2023-01-17 12:27:19 +03:00
# include "ivpu_gem.h"
2023-01-17 12:27:17 +03:00
# include "ivpu_hw.h"
2023-01-17 12:27:20 +03:00
# include "ivpu_ipc.h"
2023-01-17 12:27:22 +03:00
# include "ivpu_job.h"
2023-01-17 12:27:21 +03:00
# include "ivpu_jsm_msg.h"
2023-01-17 12:27:18 +03:00
# include "ivpu_mmu.h"
# include "ivpu_mmu_context.h"
2023-01-17 12:27:23 +03:00
# include "ivpu_pm.h"
2023-01-17 12:27:17 +03:00
# ifndef DRIVER_VERSION_STR
# define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
__stringify ( DRM_IVPU_DRIVER_MINOR ) " . "
# endif
2023-01-17 12:27:22 +03:00
static struct lock_class_key submitted_jobs_xa_lock_class_key ;
2023-01-17 12:27:17 +03:00
int ivpu_dbg_mask ;
module_param_named ( dbg_mask , ivpu_dbg_mask , int , 0644 ) ;
MODULE_PARM_DESC ( dbg_mask , " Driver debug mask. See IVPU_DBG_* macros. " ) ;
2023-01-17 12:27:21 +03:00
int ivpu_test_mode ;
module_param_named_unsafe ( test_mode , ivpu_test_mode , int , 0644 ) ;
2023-10-28 16:34:10 +03:00
MODULE_PARM_DESC ( test_mode , " Test mode mask. See IVPU_TEST_MODE_* macros. " ) ;
2023-01-17 12:27:21 +03:00
2023-01-17 12:27:17 +03:00
u8 ivpu_pll_min_ratio ;
module_param_named ( pll_min_ratio , ivpu_pll_min_ratio , byte , 0644 ) ;
MODULE_PARM_DESC ( pll_min_ratio , " Minimum PLL ratio used to set VPU frequency " ) ;
u8 ivpu_pll_max_ratio = U8_MAX ;
module_param_named ( pll_max_ratio , ivpu_pll_max_ratio , byte , 0644 ) ;
MODULE_PARM_DESC ( pll_max_ratio , " Maximum PLL ratio used to set VPU frequency " ) ;
2023-05-18 16:16:05 +03:00
bool ivpu_disable_mmu_cont_pages ;
module_param_named ( disable_mmu_cont_pages , ivpu_disable_mmu_cont_pages , bool , 0644 ) ;
MODULE_PARM_DESC ( disable_mmu_cont_pages , " Disable MMU contiguous pages optimization " ) ;
2023-01-17 12:27:17 +03:00
struct ivpu_file_priv * ivpu_file_priv_get ( struct ivpu_file_priv * file_priv )
{
2023-01-17 12:27:18 +03:00
struct ivpu_device * vdev = file_priv - > vdev ;
2023-01-17 12:27:17 +03:00
kref_get ( & file_priv - > ref ) ;
2023-01-17 12:27:18 +03:00
ivpu_dbg ( vdev , KREF , " file_priv get: ctx %u refcount %u \n " ,
file_priv - > ctx . id , kref_read ( & file_priv - > ref ) ) ;
2023-01-17 12:27:17 +03:00
return file_priv ;
}
2024-01-22 15:09:43 +03:00
static void file_priv_unbind ( struct ivpu_device * vdev , struct ivpu_file_priv * file_priv )
2023-01-17 12:27:19 +03:00
{
2024-01-22 15:09:43 +03:00
mutex_lock ( & file_priv - > lock ) ;
if ( file_priv - > bound ) {
ivpu_dbg ( vdev , FILE , " file_priv unbind: ctx %u \n " , file_priv - > ctx . id ) ;
ivpu_cmdq_release_all_locked ( file_priv ) ;
ivpu_jsm_context_release ( vdev , file_priv - > ctx . id ) ;
ivpu_bo_unbind_all_bos_from_context ( vdev , & file_priv - > ctx ) ;
ivpu_mmu_user_context_fini ( vdev , & file_priv - > ctx ) ;
file_priv - > bound = false ;
drm_WARN_ON ( & vdev - > drm , ! xa_erase_irq ( & vdev - > context_xa , file_priv - > ctx . id ) ) ;
}
mutex_unlock ( & file_priv - > lock ) ;
2023-01-17 12:27:19 +03:00
}
2023-01-17 12:27:17 +03:00
static void file_priv_release ( struct kref * ref )
{
struct ivpu_file_priv * file_priv = container_of ( ref , struct ivpu_file_priv , ref ) ;
2023-01-17 12:27:18 +03:00
struct ivpu_device * vdev = file_priv - > vdev ;
2023-01-17 12:27:17 +03:00
2024-01-22 15:09:43 +03:00
ivpu_dbg ( vdev , FILE , " file_priv release: ctx %u bound %d \n " ,
file_priv - > ctx . id , ( bool ) file_priv - > bound ) ;
pm_runtime_get_sync ( vdev - > drm . dev ) ;
mutex_lock ( & vdev - > context_list_lock ) ;
file_priv_unbind ( vdev , file_priv ) ;
mutex_unlock ( & vdev - > context_list_lock ) ;
pm_runtime_put_autosuspend ( vdev - > drm . dev ) ;
2023-01-17 12:27:18 +03:00
2023-01-17 12:27:22 +03:00
mutex_destroy ( & file_priv - > lock ) ;
2023-01-17 12:27:17 +03:00
kfree ( file_priv ) ;
}
void ivpu_file_priv_put ( struct ivpu_file_priv * * link )
{
struct ivpu_file_priv * file_priv = * link ;
2023-01-17 12:27:18 +03:00
struct ivpu_device * vdev = file_priv - > vdev ;
2023-01-17 12:27:17 +03:00
2023-01-17 12:27:19 +03:00
drm_WARN_ON ( & vdev - > drm , ! file_priv ) ;
2023-01-17 12:27:17 +03:00
2023-01-17 12:27:18 +03:00
ivpu_dbg ( vdev , KREF , " file_priv put: ctx %u refcount %u \n " ,
file_priv - > ctx . id , kref_read ( & file_priv - > ref ) ) ;
2023-01-17 12:27:17 +03:00
* link = NULL ;
kref_put ( & file_priv - > ref , file_priv_release ) ;
}
2023-07-31 19:12:56 +03:00
static int ivpu_get_capabilities ( struct ivpu_device * vdev , struct drm_ivpu_param * args )
{
switch ( args - > index ) {
case DRM_IVPU_CAP_METRIC_STREAMER :
args - > value = 0 ;
break ;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE :
2023-07-31 19:12:57 +03:00
args - > value = 1 ;
2023-07-31 19:12:56 +03:00
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2023-10-20 13:44:58 +03:00
static int ivpu_get_core_clock_rate ( struct ivpu_device * vdev , u64 * clk_rate )
{
int ret ;
ret = ivpu_rpm_get_if_active ( vdev ) ;
if ( ret < 0 )
return ret ;
* clk_rate = ret ? ivpu_hw_reg_pll_freq_get ( vdev ) : 0 ;
if ( ret )
ivpu_rpm_put ( vdev ) ;
return 0 ;
}
2023-01-17 12:27:17 +03:00
static int ivpu_get_param_ioctl ( struct drm_device * dev , void * data , struct drm_file * file )
{
struct ivpu_file_priv * file_priv = file - > driver_priv ;
struct ivpu_device * vdev = file_priv - > vdev ;
struct pci_dev * pdev = to_pci_dev ( vdev - > drm . dev ) ;
struct drm_ivpu_param * args = data ;
int ret = 0 ;
2023-03-23 15:54:57 +03:00
int idx ;
if ( ! drm_dev_enter ( dev , & idx ) )
return - ENODEV ;
2023-01-17 12:27:17 +03:00
switch ( args - > param ) {
case DRM_IVPU_PARAM_DEVICE_ID :
args - > value = pdev - > device ;
break ;
case DRM_IVPU_PARAM_DEVICE_REVISION :
args - > value = pdev - > revision ;
break ;
case DRM_IVPU_PARAM_PLATFORM_TYPE :
args - > value = vdev - > platform ;
break ;
case DRM_IVPU_PARAM_CORE_CLOCK_RATE :
2023-10-20 13:44:58 +03:00
ret = ivpu_get_core_clock_rate ( vdev , & args - > value ) ;
2023-01-17 12:27:17 +03:00
break ;
case DRM_IVPU_PARAM_NUM_CONTEXTS :
args - > value = ivpu_get_context_count ( vdev ) ;
break ;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS :
2023-07-31 19:12:57 +03:00
args - > value = vdev - > hw - > ranges . user . start ;
2023-01-17 12:27:17 +03:00
break ;
2023-01-17 12:27:18 +03:00
case DRM_IVPU_PARAM_CONTEXT_ID :
args - > value = file_priv - > ctx . id ;
break ;
2023-01-17 12:27:21 +03:00
case DRM_IVPU_PARAM_FW_API_VERSION :
if ( args - > index < VPU_FW_API_VER_NUM ) {
struct vpu_firmware_header * fw_hdr ;
fw_hdr = ( struct vpu_firmware_header * ) vdev - > fw - > file - > data ;
args - > value = fw_hdr - > api_version [ args - > index ] ;
} else {
ret = - EINVAL ;
}
break ;
case DRM_IVPU_PARAM_ENGINE_HEARTBEAT :
ret = ivpu_jsm_get_heartbeat ( vdev , args - > index , & args - > value ) ;
break ;
case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID :
args - > value = ( u64 ) atomic64_inc_return ( & vdev - > unique_id_counter ) ;
break ;
case DRM_IVPU_PARAM_TILE_CONFIG :
args - > value = vdev - > hw - > tile_fuse ;
break ;
case DRM_IVPU_PARAM_SKU :
args - > value = vdev - > hw - > sku ;
break ;
2023-07-31 19:12:56 +03:00
case DRM_IVPU_PARAM_CAPABILITIES :
ret = ivpu_get_capabilities ( vdev , args ) ;
break ;
2023-01-17 12:27:17 +03:00
default :
ret = - EINVAL ;
break ;
}
2023-03-23 15:54:57 +03:00
drm_dev_exit ( idx ) ;
2023-01-17 12:27:17 +03:00
return ret ;
}
static int ivpu_set_param_ioctl ( struct drm_device * dev , void * data , struct drm_file * file )
{
struct drm_ivpu_param * args = data ;
int ret = 0 ;
switch ( args - > param ) {
default :
ret = - EINVAL ;
}
return ret ;
}
static int ivpu_open ( struct drm_device * dev , struct drm_file * file )
{
struct ivpu_device * vdev = to_ivpu_device ( dev ) ;
struct ivpu_file_priv * file_priv ;
2023-01-17 12:27:18 +03:00
u32 ctx_id ;
2024-01-22 15:09:43 +03:00
int idx , ret ;
2023-01-17 12:27:18 +03:00
2024-01-22 15:09:43 +03:00
if ( ! drm_dev_enter ( dev , & idx ) )
return - ENODEV ;
2023-01-17 12:27:17 +03:00
file_priv = kzalloc ( sizeof ( * file_priv ) , GFP_KERNEL ) ;
2023-01-17 12:27:18 +03:00
if ( ! file_priv ) {
ret = - ENOMEM ;
2024-01-22 15:09:43 +03:00
goto err_dev_exit ;
2023-01-17 12:27:18 +03:00
}
2023-01-17 12:27:17 +03:00
file_priv - > vdev = vdev ;
2024-01-22 15:09:43 +03:00
file_priv - > bound = true ;
2023-01-17 12:27:17 +03:00
kref_init ( & file_priv - > ref ) ;
2023-01-17 12:27:22 +03:00
mutex_init ( & file_priv - > lock ) ;
2023-01-17 12:27:17 +03:00
2024-01-22 15:09:43 +03:00
mutex_lock ( & vdev - > context_list_lock ) ;
ret = xa_alloc_irq ( & vdev - > context_xa , & ctx_id , file_priv ,
vdev - > context_xa_limit , GFP_KERNEL ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to allocate context id: %d \n " , ret ) ;
goto err_unlock ;
}
2023-01-17 12:27:18 +03:00
ret = ivpu_mmu_user_context_init ( vdev , & file_priv - > ctx , ctx_id ) ;
if ( ret )
2024-01-22 15:09:43 +03:00
goto err_xa_erase ;
2023-01-17 12:27:18 +03:00
2024-01-22 15:09:43 +03:00
mutex_unlock ( & vdev - > context_list_lock ) ;
drm_dev_exit ( idx ) ;
file - > driver_priv = file_priv ;
2023-01-17 12:27:18 +03:00
ivpu_dbg ( vdev , FILE , " file_priv create: ctx %u process %s pid %d \n " ,
ctx_id , current - > comm , task_pid_nr ( current ) ) ;
2023-01-17 12:27:17 +03:00
return 0 ;
2023-01-17 12:27:18 +03:00
err_xa_erase :
xa_erase_irq ( & vdev - > context_xa , ctx_id ) ;
2024-01-22 15:09:43 +03:00
err_unlock :
mutex_unlock ( & vdev - > context_list_lock ) ;
mutex_destroy ( & file_priv - > lock ) ;
kfree ( file_priv ) ;
err_dev_exit :
drm_dev_exit ( idx ) ;
2023-01-17 12:27:18 +03:00
return ret ;
2023-01-17 12:27:17 +03:00
}
static void ivpu_postclose ( struct drm_device * dev , struct drm_file * file )
{
struct ivpu_file_priv * file_priv = file - > driver_priv ;
2023-01-17 12:27:18 +03:00
struct ivpu_device * vdev = to_ivpu_device ( dev ) ;
ivpu_dbg ( vdev , FILE , " file_priv close: ctx %u process %s pid %d \n " ,
file_priv - > ctx . id , current - > comm , task_pid_nr ( current ) ) ;
2023-01-17 12:27:17 +03:00
ivpu_file_priv_put ( & file_priv ) ;
}
static const struct drm_ioctl_desc ivpu_drm_ioctls [ ] = {
DRM_IOCTL_DEF_DRV ( IVPU_GET_PARAM , ivpu_get_param_ioctl , 0 ) ,
DRM_IOCTL_DEF_DRV ( IVPU_SET_PARAM , ivpu_set_param_ioctl , 0 ) ,
2023-01-17 12:27:19 +03:00
DRM_IOCTL_DEF_DRV ( IVPU_BO_CREATE , ivpu_bo_create_ioctl , 0 ) ,
DRM_IOCTL_DEF_DRV ( IVPU_BO_INFO , ivpu_bo_info_ioctl , 0 ) ,
2023-01-17 12:27:22 +03:00
DRM_IOCTL_DEF_DRV ( IVPU_SUBMIT , ivpu_submit_ioctl , 0 ) ,
DRM_IOCTL_DEF_DRV ( IVPU_BO_WAIT , ivpu_bo_wait_ioctl , 0 ) ,
2023-01-17 12:27:17 +03:00
} ;
2023-01-17 12:27:21 +03:00
static int ivpu_wait_for_ready ( struct ivpu_device * vdev )
{
struct ivpu_ipc_consumer cons ;
struct ivpu_ipc_hdr ipc_hdr ;
unsigned long timeout ;
int ret ;
2023-10-28 16:34:10 +03:00
if ( ivpu_test_mode & IVPU_TEST_MODE_FW_TEST )
2023-01-17 12:27:21 +03:00
return 0 ;
2023-11-13 20:02:52 +03:00
ivpu_ipc_consumer_add ( vdev , & cons , IVPU_IPC_CHAN_BOOT_MSG , NULL ) ;
2023-01-17 12:27:21 +03:00
timeout = jiffies + msecs_to_jiffies ( vdev - > timeout . boot ) ;
while ( 1 ) {
2023-11-13 20:02:52 +03:00
ivpu_ipc_irq_handler ( vdev , NULL ) ;
2023-01-17 12:27:21 +03:00
ret = ivpu_ipc_receive ( vdev , & cons , & ipc_hdr , NULL , 0 ) ;
if ( ret ! = - ETIMEDOUT | | time_after_eq ( jiffies , timeout ) )
break ;
cond_resched ( ) ;
}
ivpu_ipc_consumer_del ( vdev , & cons ) ;
if ( ! ret & & ipc_hdr . data_addr ! = IVPU_IPC_BOOT_MSG_DATA_ADDR ) {
ivpu_err ( vdev , " Invalid VPU ready message: 0x%x \n " ,
ipc_hdr . data_addr ) ;
return - EIO ;
}
if ( ! ret )
2023-09-25 15:11:33 +03:00
ivpu_dbg ( vdev , PM , " VPU ready message received successfully \n " ) ;
2023-01-17 12:27:21 +03:00
return ret ;
}
/**
* ivpu_boot ( ) - Start VPU firmware
* @ vdev : VPU device
*
* This function is paired with ivpu_shutdown ( ) but it doesn ' t power up the
* VPU because power up has to be called very early in ivpu_probe ( ) .
*/
int ivpu_boot ( struct ivpu_device * vdev )
{
int ret ;
/* Update boot params located at first 4KB of FW memory */
2023-10-31 10:31:56 +03:00
ivpu_fw_boot_params_setup ( vdev , ivpu_bo_vaddr ( vdev - > fw - > mem ) ) ;
2023-01-17 12:27:21 +03:00
ret = ivpu_hw_boot_fw ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to start the firmware: %d \n " , ret ) ;
return ret ;
}
ret = ivpu_wait_for_ready ( vdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to boot the firmware: %d \n " , ret ) ;
2024-01-15 16:44:29 +03:00
ivpu_hw_diagnose_failure ( vdev ) ;
2024-01-15 16:44:26 +03:00
ivpu_mmu_evtq_dump ( vdev ) ;
2024-01-15 16:44:29 +03:00
ivpu_fw_log_dump ( vdev ) ;
2023-01-17 12:27:21 +03:00
return ret ;
}
ivpu_hw_irq_clear ( vdev ) ;
enable_irq ( vdev - > irq ) ;
ivpu_hw_irq_enable ( vdev ) ;
ivpu_ipc_enable ( vdev ) ;
return 0 ;
}
2023-10-03 09:42:13 +03:00
void ivpu_prepare_for_reset ( struct ivpu_device * vdev )
2023-01-17 12:27:17 +03:00
{
ivpu_hw_irq_disable ( vdev ) ;
2023-01-17 12:27:21 +03:00
disable_irq ( vdev - > irq ) ;
2023-01-17 12:27:20 +03:00
ivpu_ipc_disable ( vdev ) ;
2023-01-17 12:27:18 +03:00
ivpu_mmu_disable ( vdev ) ;
2023-10-03 09:42:13 +03:00
}
int ivpu_shutdown ( struct ivpu_device * vdev )
{
int ret ;
ivpu_prepare_for_reset ( vdev ) ;
2023-01-17 12:27:17 +03:00
ret = ivpu_hw_power_down ( vdev ) ;
if ( ret )
ivpu_warn ( vdev , " Failed to power down HW: %d \n " , ret ) ;
return ret ;
}
static const struct file_operations ivpu_fops = {
. owner = THIS_MODULE ,
DRM_ACCEL_FOPS ,
} ;
static const struct drm_driver driver = {
. driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL ,
. open = ivpu_open ,
. postclose = ivpu_postclose ,
2023-10-31 10:31:56 +03:00
. gem_create_object = ivpu_gem_create_object ,
. gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table ,
2023-01-17 12:27:17 +03:00
. ioctls = ivpu_drm_ioctls ,
. num_ioctls = ARRAY_SIZE ( ivpu_drm_ioctls ) ,
. fops = & ivpu_fops ,
. name = DRIVER_NAME ,
. desc = DRIVER_DESC ,
. date = DRIVER_DATE ,
. major = DRM_IVPU_DRIVER_MAJOR ,
. minor = DRM_IVPU_DRIVER_MINOR ,
} ;
2023-11-13 20:02:52 +03:00
static irqreturn_t ivpu_irq_thread_handler ( int irq , void * arg )
{
struct ivpu_device * vdev = arg ;
return ivpu_ipc_irq_thread_handler ( vdev ) ;
}
2023-01-17 12:27:17 +03:00
static int ivpu_irq_init ( struct ivpu_device * vdev )
{
struct pci_dev * pdev = to_pci_dev ( vdev - > drm . dev ) ;
int ret ;
ret = pci_alloc_irq_vectors ( pdev , 1 , 1 , PCI_IRQ_MSI | PCI_IRQ_MSIX ) ;
if ( ret < 0 ) {
ivpu_err ( vdev , " Failed to allocate a MSI IRQ: %d \n " , ret ) ;
return ret ;
}
vdev - > irq = pci_irq_vector ( pdev , 0 ) ;
2023-11-13 20:02:52 +03:00
ret = devm_request_threaded_irq ( vdev - > drm . dev , vdev - > irq , vdev - > hw - > ops - > irq_handler ,
ivpu_irq_thread_handler , IRQF_NO_AUTOEN , DRIVER_NAME , vdev ) ;
2023-01-17 12:27:17 +03:00
if ( ret )
ivpu_err ( vdev , " Failed to request an IRQ %d \n " , ret ) ;
return ret ;
}
static int ivpu_pci_init ( struct ivpu_device * vdev )
{
struct pci_dev * pdev = to_pci_dev ( vdev - > drm . dev ) ;
struct resource * bar0 = & pdev - > resource [ 0 ] ;
struct resource * bar4 = & pdev - > resource [ 4 ] ;
int ret ;
ivpu_dbg ( vdev , MISC , " Mapping BAR0 (RegV) %pR \n " , bar0 ) ;
vdev - > regv = devm_ioremap_resource ( vdev - > drm . dev , bar0 ) ;
if ( IS_ERR ( vdev - > regv ) ) {
ivpu_err ( vdev , " Failed to map bar 0: %pe \n " , vdev - > regv ) ;
return PTR_ERR ( vdev - > regv ) ;
}
ivpu_dbg ( vdev , MISC , " Mapping BAR4 (RegB) %pR \n " , bar4 ) ;
vdev - > regb = devm_ioremap_resource ( vdev - > drm . dev , bar4 ) ;
if ( IS_ERR ( vdev - > regb ) ) {
ivpu_err ( vdev , " Failed to map bar 4: %pe \n " , vdev - > regb ) ;
return PTR_ERR ( vdev - > regb ) ;
}
2023-05-18 16:16:03 +03:00
ret = dma_set_mask_and_coherent ( vdev - > drm . dev , DMA_BIT_MASK ( vdev - > hw - > dma_bits ) ) ;
2023-01-17 12:27:17 +03:00
if ( ret ) {
ivpu_err ( vdev , " Failed to set DMA mask: %d \n " , ret ) ;
return ret ;
}
2023-02-02 12:21:13 +03:00
dma_set_max_seg_size ( vdev - > drm . dev , UINT_MAX ) ;
2023-01-17 12:27:17 +03:00
/* Clear any pending errors */
pcie_capability_clear_word ( pdev , PCI_EXP_DEVSTA , 0x3f ) ;
2024-01-26 15:28:00 +03:00
/* NPU does not require 10m D3hot delay */
pdev - > d3hot_delay = 0 ;
2023-04-03 15:15:45 +03:00
2023-01-17 12:27:17 +03:00
ret = pcim_enable_device ( pdev ) ;
if ( ret ) {
ivpu_err ( vdev , " Failed to enable PCI device: %d \n " , ret ) ;
return ret ;
}
pci_set_master ( pdev ) ;
return 0 ;
}
static int ivpu_dev_init ( struct ivpu_device * vdev )
{
int ret ;
vdev - > hw = drmm_kzalloc ( & vdev - > drm , sizeof ( * vdev - > hw ) , GFP_KERNEL ) ;
if ( ! vdev - > hw )
return - ENOMEM ;
2023-01-17 12:27:18 +03:00
vdev - > mmu = drmm_kzalloc ( & vdev - > drm , sizeof ( * vdev - > mmu ) , GFP_KERNEL ) ;
if ( ! vdev - > mmu )
return - ENOMEM ;
2023-01-17 12:27:21 +03:00
vdev - > fw = drmm_kzalloc ( & vdev - > drm , sizeof ( * vdev - > fw ) , GFP_KERNEL ) ;
if ( ! vdev - > fw )
return - ENOMEM ;
2023-01-17 12:27:20 +03:00
vdev - > ipc = drmm_kzalloc ( & vdev - > drm , sizeof ( * vdev - > ipc ) , GFP_KERNEL ) ;
if ( ! vdev - > ipc )
return - ENOMEM ;
2023-01-17 12:27:23 +03:00
vdev - > pm = drmm_kzalloc ( & vdev - > drm , sizeof ( * vdev - > pm ) , GFP_KERNEL ) ;
if ( ! vdev - > pm )
return - ENOMEM ;
2023-07-31 19:12:58 +03:00
if ( ivpu_hw_gen ( vdev ) > = IVPU_HW_40XX ) {
vdev - > hw - > ops = & ivpu_hw_40xx_ops ;
vdev - > hw - > dma_bits = 48 ;
} else {
vdev - > hw - > ops = & ivpu_hw_37xx_ops ;
vdev - > hw - > dma_bits = 38 ;
}
2023-05-18 16:16:03 +03:00
2023-01-17 12:27:17 +03:00
vdev - > platform = IVPU_PLATFORM_INVALID ;
2023-03-23 15:54:59 +03:00
vdev - > context_xa_limit . min = IVPU_USER_CONTEXT_MIN_SSID ;
vdev - > context_xa_limit . max = IVPU_USER_CONTEXT_MAX_SSID ;
2023-01-17 12:27:21 +03:00
atomic64_set ( & vdev - > unique_id_counter , 0 ) ;
2023-01-17 12:27:17 +03:00
xa_init_flags ( & vdev - > context_xa , XA_FLAGS_ALLOC ) ;
2023-01-17 12:27:22 +03:00
xa_init_flags ( & vdev - > submitted_jobs_xa , XA_FLAGS_ALLOC1 ) ;
lockdep_set_class ( & vdev - > submitted_jobs_xa . xa_lock , & submitted_jobs_xa_lock_class_key ) ;
2023-10-31 10:31:54 +03:00
INIT_LIST_HEAD ( & vdev - > bo_list ) ;
2024-01-22 15:09:43 +03:00
ret = drmm_mutex_init ( & vdev - > drm , & vdev - > context_list_lock ) ;
if ( ret )
goto err_xa_destroy ;
2023-10-31 10:31:54 +03:00
ret = drmm_mutex_init ( & vdev - > drm , & vdev - > bo_list_lock ) ;
if ( ret )
goto err_xa_destroy ;
2023-01-17 12:27:17 +03:00
ret = ivpu_pci_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:17 +03:00
goto err_xa_destroy ;
ret = ivpu_irq_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:17 +03:00
goto err_xa_destroy ;
/* Init basic HW info based on buttress registers which are accessible before power up */
ret = ivpu_hw_info_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:17 +03:00
goto err_xa_destroy ;
/* Power up early so the rest of init code can access VPU registers */
ret = ivpu_hw_power_up ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-10-28 18:59:30 +03:00
goto err_power_down ;
2023-01-17 12:27:17 +03:00
2023-01-17 12:27:18 +03:00
ret = ivpu_mmu_global_context_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:18 +03:00
goto err_power_down ;
ret = ivpu_mmu_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:18 +03:00
goto err_mmu_gctx_fini ;
2023-09-01 12:49:52 +03:00
ret = ivpu_mmu_reserved_context_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:21 +03:00
goto err_mmu_gctx_fini ;
2023-09-01 12:49:52 +03:00
ret = ivpu_fw_init ( vdev ) ;
if ( ret )
goto err_mmu_rctx_fini ;
2023-01-17 12:27:20 +03:00
ret = ivpu_ipc_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:21 +03:00
goto err_fw_fini ;
2023-09-01 12:49:49 +03:00
ivpu_pm_init ( vdev ) ;
2023-01-17 12:27:23 +03:00
2023-01-17 12:27:21 +03:00
ret = ivpu_boot ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-11-13 20:02:52 +03:00
goto err_ipc_fini ;
2023-01-17 12:27:20 +03:00
2023-11-13 20:02:52 +03:00
ivpu_job_done_consumer_init ( vdev ) ;
2023-01-17 12:27:23 +03:00
ivpu_pm_enable ( vdev ) ;
2023-01-17 12:27:17 +03:00
return 0 ;
2023-01-17 12:27:22 +03:00
err_ipc_fini :
ivpu_ipc_fini ( vdev ) ;
2023-01-17 12:27:21 +03:00
err_fw_fini :
ivpu_fw_fini ( vdev ) ;
2023-09-01 12:49:52 +03:00
err_mmu_rctx_fini :
ivpu_mmu_reserved_context_fini ( vdev ) ;
2023-01-17 12:27:18 +03:00
err_mmu_gctx_fini :
ivpu_mmu_global_context_fini ( vdev ) ;
err_power_down :
ivpu_hw_power_down ( vdev ) ;
2023-03-23 15:55:01 +03:00
if ( IVPU_WA ( d3hot_after_power_off ) )
pci_set_power_state ( to_pci_dev ( vdev - > drm . dev ) , PCI_D3hot ) ;
2023-01-17 12:27:17 +03:00
err_xa_destroy :
2023-01-17 12:27:22 +03:00
xa_destroy ( & vdev - > submitted_jobs_xa ) ;
2023-01-17 12:27:17 +03:00
xa_destroy ( & vdev - > context_xa ) ;
return ret ;
}
2024-01-22 15:09:43 +03:00
static void ivpu_bo_unbind_all_user_contexts ( struct ivpu_device * vdev )
{
struct ivpu_file_priv * file_priv ;
unsigned long ctx_id ;
mutex_lock ( & vdev - > context_list_lock ) ;
xa_for_each ( & vdev - > context_xa , ctx_id , file_priv )
file_priv_unbind ( vdev , file_priv ) ;
mutex_unlock ( & vdev - > context_list_lock ) ;
}
2023-01-17 12:27:17 +03:00
static void ivpu_dev_fini ( struct ivpu_device * vdev )
{
2023-01-17 12:27:23 +03:00
ivpu_pm_disable ( vdev ) ;
2023-01-17 12:27:17 +03:00
ivpu_shutdown ( vdev ) ;
2023-03-23 15:55:01 +03:00
if ( IVPU_WA ( d3hot_after_power_off ) )
pci_set_power_state ( to_pci_dev ( vdev - > drm . dev ) , PCI_D3hot ) ;
2024-01-22 15:09:43 +03:00
ivpu_jobs_abort_all ( vdev ) ;
2023-11-13 20:02:52 +03:00
ivpu_job_done_consumer_fini ( vdev ) ;
2023-03-23 15:54:58 +03:00
ivpu_pm_cancel_recovery ( vdev ) ;
2024-01-22 15:09:43 +03:00
ivpu_bo_unbind_all_user_contexts ( vdev ) ;
2023-03-23 15:54:58 +03:00
2023-01-17 12:27:20 +03:00
ivpu_ipc_fini ( vdev ) ;
2023-01-17 12:27:21 +03:00
ivpu_fw_fini ( vdev ) ;
2023-09-01 12:49:52 +03:00
ivpu_mmu_reserved_context_fini ( vdev ) ;
2023-01-17 12:27:18 +03:00
ivpu_mmu_global_context_fini ( vdev ) ;
2023-01-17 12:27:17 +03:00
2023-01-17 12:27:22 +03:00
drm_WARN_ON ( & vdev - > drm , ! xa_empty ( & vdev - > submitted_jobs_xa ) ) ;
xa_destroy ( & vdev - > submitted_jobs_xa ) ;
2023-01-17 12:27:17 +03:00
drm_WARN_ON ( & vdev - > drm , ! xa_empty ( & vdev - > context_xa ) ) ;
xa_destroy ( & vdev - > context_xa ) ;
}
static struct pci_device_id ivpu_pci_ids [ ] = {
{ PCI_DEVICE ( PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_MTL ) } ,
2023-09-22 16:22:06 +03:00
{ PCI_DEVICE ( PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_ARL ) } ,
2023-07-31 19:12:58 +03:00
{ PCI_DEVICE ( PCI_VENDOR_ID_INTEL , PCI_DEVICE_ID_LNL ) } ,
2023-01-17 12:27:17 +03:00
{ }
} ;
MODULE_DEVICE_TABLE ( pci , ivpu_pci_ids ) ;
static int ivpu_probe ( struct pci_dev * pdev , const struct pci_device_id * id )
{
struct ivpu_device * vdev ;
int ret ;
vdev = devm_drm_dev_alloc ( & pdev - > dev , & driver , struct ivpu_device , drm ) ;
if ( IS_ERR ( vdev ) )
return PTR_ERR ( vdev ) ;
pci_set_drvdata ( pdev , vdev ) ;
ret = ivpu_dev_init ( vdev ) ;
2023-09-01 12:49:48 +03:00
if ( ret )
2023-01-17 12:27:17 +03:00
return ret ;
2023-09-07 10:26:09 +03:00
ivpu_debugfs_init ( vdev ) ;
2023-01-17 12:27:17 +03:00
ret = drm_dev_register ( & vdev - > drm , 0 ) ;
if ( ret ) {
dev_err ( & pdev - > dev , " Failed to register DRM device: %d \n " , ret ) ;
ivpu_dev_fini ( vdev ) ;
}
return ret ;
}
static void ivpu_remove ( struct pci_dev * pdev )
{
struct ivpu_device * vdev = pci_get_drvdata ( pdev ) ;
2023-03-23 15:54:57 +03:00
drm_dev_unplug ( & vdev - > drm ) ;
2023-01-17 12:27:17 +03:00
ivpu_dev_fini ( vdev ) ;
}
2023-01-17 12:27:23 +03:00
static const struct dev_pm_ops ivpu_drv_pci_pm = {
SET_SYSTEM_SLEEP_PM_OPS ( ivpu_pm_suspend_cb , ivpu_pm_resume_cb )
SET_RUNTIME_PM_OPS ( ivpu_pm_runtime_suspend_cb , ivpu_pm_runtime_resume_cb , NULL )
} ;
static const struct pci_error_handlers ivpu_drv_pci_err = {
. reset_prepare = ivpu_pm_reset_prepare_cb ,
. reset_done = ivpu_pm_reset_done_cb ,
} ;
2023-01-17 12:27:17 +03:00
static struct pci_driver ivpu_pci_driver = {
. name = KBUILD_MODNAME ,
. id_table = ivpu_pci_ids ,
. probe = ivpu_probe ,
. remove = ivpu_remove ,
2023-01-17 12:27:23 +03:00
. driver = {
. pm = & ivpu_drv_pci_pm ,
} ,
. err_handler = & ivpu_drv_pci_err ,
2023-01-17 12:27:17 +03:00
} ;
module_pci_driver ( ivpu_pci_driver ) ;
MODULE_AUTHOR ( " Intel Corporation " ) ;
MODULE_DESCRIPTION ( DRIVER_DESC ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;
MODULE_VERSION ( DRIVER_VERSION_STR ) ;