2020-05-27 16:21:28 +01:00
/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
2014-06-05 13:42:39 -07:00
# ifndef ADF_ACCEL_DEVICES_H_
# define ADF_ACCEL_DEVICES_H_
2015-08-07 11:34:25 -07:00
# include <linux/interrupt.h>
2014-06-05 13:42:39 -07:00
# include <linux/module.h>
# include <linux/list.h>
2014-06-24 15:19:24 -07:00
# include <linux/io.h>
2015-08-07 11:34:25 -07:00
# include <linux/ratelimit.h>
2014-06-05 13:42:39 -07:00
# include "adf_cfg_common.h"
# define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
2015-08-07 11:34:25 -07:00
# define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
2016-01-05 11:14:55 -08:00
# define ADF_C62X_DEVICE_NAME "c6xx"
# define ADF_C62XVF_DEVICE_NAME "c6xxvf"
2015-12-04 16:56:17 -08:00
# define ADF_C3XXX_DEVICE_NAME "c3xxx"
# define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
2020-11-13 16:46:43 +00:00
# define ADF_4XXX_DEVICE_NAME "4xxx"
2020-11-06 19:28:10 +08:00
# define ADF_4XXX_PCI_DEVICE_ID 0x4940
# define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
2015-12-04 16:56:28 -08:00
# define ADF_DEVICE_FUSECTL_OFFSET 0x40
# define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
2016-12-22 15:00:12 +00:00
# define ADF_DEVICE_FUSECTL_MASK 0x80000000
2014-06-05 13:42:39 -07:00
# define ADF_PCI_MAX_BARS 3
# define ADF_DEVICE_NAME_LENGTH 32
# define ADF_ETR_MAX_RINGS_PER_BANK 16
# define ADF_MAX_MSIX_VECTOR_NAME 16
# define ADF_DEVICE_NAME_PREFIX "qat_"
enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_NULL = 0 ,
ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1 ,
ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2 ,
ADF_ACCEL_CAPABILITIES_CIPHER = 4 ,
ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8 ,
ADF_ACCEL_CAPABILITIES_COMPRESSION = 32 ,
ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64 ,
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
} ;
struct adf_bar {
resource_size_t base_addr ;
void __iomem * virt_addr ;
resource_size_t size ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
2021-09-01 18:36:07 +01:00
struct adf_irq {
bool enabled ;
char name [ ADF_MAX_MSIX_VECTOR_NAME ] ;
} ;
2014-06-05 13:42:39 -07:00
struct adf_accel_msix {
2021-09-01 18:36:07 +01:00
struct adf_irq * irqs ;
2015-08-07 11:34:25 -07:00
u32 num_entries ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
struct adf_accel_pci {
struct pci_dev * pci_dev ;
struct adf_accel_msix msix_entries ;
struct adf_bar pci_bars [ ADF_PCI_MAX_BARS ] ;
2020-06-03 18:33:44 +01:00
u8 revid ;
u8 sku ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
enum dev_state {
DEV_DOWN = 0 ,
DEV_UP
} ;
enum dev_sku_info {
DEV_SKU_1 = 0 ,
DEV_SKU_2 ,
DEV_SKU_3 ,
DEV_SKU_4 ,
2015-08-07 11:34:25 -07:00
DEV_SKU_VF ,
2014-06-05 13:42:39 -07:00
DEV_SKU_UNKNOWN ,
} ;
static inline const char * get_sku_info ( enum dev_sku_info info )
{
switch ( info ) {
case DEV_SKU_1 :
return " SKU1 " ;
case DEV_SKU_2 :
return " SKU2 " ;
case DEV_SKU_3 :
return " SKU3 " ;
case DEV_SKU_4 :
return " SKU4 " ;
2015-08-07 11:34:25 -07:00
case DEV_SKU_VF :
return " SKUVF " ;
2014-06-05 13:42:39 -07:00
case DEV_SKU_UNKNOWN :
default :
break ;
}
return " Unknown SKU " ;
}
struct adf_hw_device_class {
const char * name ;
const enum adf_device_type type ;
2020-06-03 18:33:44 +01:00
u32 instances ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
2020-10-12 21:38:31 +01:00
struct arb_info {
u32 arb_cfg ;
u32 arb_offset ;
u32 wt2sam_offset ;
} ;
2020-10-12 21:38:23 +01:00
struct admin_info {
u32 admin_msg_ur ;
u32 admin_msg_lr ;
u32 mailbox_offset ;
} ;
2020-10-12 21:38:21 +01:00
struct adf_hw_csr_ops {
2020-10-12 21:38:35 +01:00
u64 ( * build_csr_ring_base_addr ) ( dma_addr_t addr , u32 size ) ;
2020-10-12 21:38:21 +01:00
u32 ( * read_csr_ring_head ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring ) ;
void ( * write_csr_ring_head ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring , u32 value ) ;
u32 ( * read_csr_ring_tail ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring ) ;
void ( * write_csr_ring_tail ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring , u32 value ) ;
u32 ( * read_csr_e_stat ) ( void __iomem * csr_base_addr , u32 bank ) ;
void ( * write_csr_ring_config ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring , u32 value ) ;
void ( * write_csr_ring_base ) ( void __iomem * csr_base_addr , u32 bank ,
u32 ring , dma_addr_t addr ) ;
void ( * write_csr_int_flag ) ( void __iomem * csr_base_addr , u32 bank ,
u32 value ) ;
void ( * write_csr_int_srcsel ) ( void __iomem * csr_base_addr , u32 bank ) ;
void ( * write_csr_int_col_en ) ( void __iomem * csr_base_addr , u32 bank ,
u32 value ) ;
void ( * write_csr_int_col_ctl ) ( void __iomem * csr_base_addr , u32 bank ,
u32 value ) ;
void ( * write_csr_int_flag_and_col ) ( void __iomem * csr_base_addr ,
u32 bank , u32 value ) ;
2020-10-12 21:38:38 +01:00
void ( * write_csr_ring_srv_arb_en ) ( void __iomem * csr_base_addr , u32 bank ,
u32 value ) ;
2020-10-12 21:38:21 +01:00
} ;
2014-06-05 13:42:39 -07:00
struct adf_cfg_device_data ;
struct adf_accel_dev ;
struct adf_etr_data ;
struct adf_etr_ring_data ;
2021-11-17 14:30:47 +00:00
struct adf_pfvf_ops {
int ( * enable_comms ) ( struct adf_accel_dev * accel_dev ) ;
u32 ( * get_pf2vf_offset ) ( u32 i ) ;
2021-11-17 14:30:48 +00:00
u32 ( * get_vf2pf_offset ) ( u32 i ) ;
2021-11-17 14:30:47 +00:00
u32 ( * get_vf2pf_sources ) ( void __iomem * pmisc_addr ) ;
void ( * enable_vf2pf_interrupts ) ( void __iomem * pmisc_addr , u32 vf_mask ) ;
void ( * disable_vf2pf_interrupts ) ( void __iomem * pmisc_addr , u32 vf_mask ) ;
2021-11-17 14:30:49 +00:00
int ( * send_msg ) ( struct adf_accel_dev * accel_dev , u32 msg , u8 vf_nr ) ;
2021-11-17 14:30:50 +00:00
u32 ( * recv_msg ) ( struct adf_accel_dev * accel_dev , u8 vf_nr ) ;
2021-11-17 14:30:47 +00:00
} ;
2014-06-05 13:42:39 -07:00
struct adf_hw_device_data {
struct adf_hw_device_class * dev_class ;
2020-10-12 21:38:18 +01:00
u32 ( * get_accel_mask ) ( struct adf_hw_device_data * self ) ;
u32 ( * get_ae_mask ) ( struct adf_hw_device_data * self ) ;
2020-10-12 21:38:32 +01:00
u32 ( * get_accel_cap ) ( struct adf_accel_dev * accel_dev ) ;
2020-06-03 18:33:44 +01:00
u32 ( * get_sram_bar_id ) ( struct adf_hw_device_data * self ) ;
u32 ( * get_misc_bar_id ) ( struct adf_hw_device_data * self ) ;
u32 ( * get_etr_bar_id ) ( struct adf_hw_device_data * self ) ;
u32 ( * get_num_aes ) ( struct adf_hw_device_data * self ) ;
u32 ( * get_num_accels ) ( struct adf_hw_device_data * self ) ;
2020-10-12 21:38:31 +01:00
void ( * get_arb_info ) ( struct arb_info * arb_csrs_info ) ;
2020-10-12 21:38:23 +01:00
void ( * get_admin_info ) ( struct admin_info * admin_csrs_info ) ;
2014-06-05 13:42:39 -07:00
enum dev_sku_info ( * get_sku ) ( struct adf_hw_device_data * self ) ;
int ( * alloc_irq ) ( struct adf_accel_dev * accel_dev ) ;
void ( * free_irq ) ( struct adf_accel_dev * accel_dev ) ;
void ( * enable_error_correction ) ( struct adf_accel_dev * accel_dev ) ;
crypto: qat - fix device reset flow
When the device needs a reset, e.g. when an uncorrectable PCIe AER event
occurs, various services/data structures need to be cleaned up, the
hardware reset and the services/data structures initialized and started.
The code to perform the cleanup and initialization was not performed when
a device reset was done.
This patch moves some of the initialization code out of the .probe entry-
point into a separate function that is now called during probe as well as
after the hardware has been reset. Similarly, a new function is added for
first cleaning up these services/data structures prior to resetting. The
new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for
which there are already prototypes but no actual functions just yet and are
now called when the device is reset and during probe/cleanup of the driver.
The down and up flows via ioctl calls has similarly been updated.
In addition, there are two other bugs in the reset flow - one in the logic
for determining whether to schedule a device reset upon receiving an
uncorrectable AER event which prevents the reset flow from being initiated,
and another with clearing the status bit indicating a device is configured
(when resetting the device the configuration remains across the reset so
the bit should not be cleared, otherwise, the necessary services will not
be re-started in adf_dev_start() after the reset - clear the bit only when
actually deleting the configuration).
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
int ( * init_admin_comms ) ( struct adf_accel_dev * accel_dev ) ;
void ( * exit_admin_comms ) ( struct adf_accel_dev * accel_dev ) ;
2015-08-07 11:34:20 -07:00
int ( * send_admin_init ) ( struct adf_accel_dev * accel_dev ) ;
crypto: qat - fix device reset flow
When the device needs a reset, e.g. when an uncorrectable PCIe AER event
occurs, various services/data structures need to be cleaned up, the
hardware reset and the services/data structures initialized and started.
The code to perform the cleanup and initialization was not performed when
a device reset was done.
This patch moves some of the initialization code out of the .probe entry-
point into a separate function that is now called during probe as well as
after the hardware has been reset. Similarly, a new function is added for
first cleaning up these services/data structures prior to resetting. The
new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for
which there are already prototypes but no actual functions just yet and are
now called when the device is reset and during probe/cleanup of the driver.
The down and up flows via ioctl calls has similarly been updated.
In addition, there are two other bugs in the reset flow - one in the logic
for determining whether to schedule a device reset upon receiving an
uncorrectable AER event which prevents the reset flow from being initiated,
and another with clearing the status bit indicating a device is configured
(when resetting the device the configuration remains across the reset so
the bit should not be cleared, otherwise, the necessary services will not
be re-started in adf_dev_start() after the reset - clear the bit only when
actually deleting the configuration).
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
int ( * init_arb ) ( struct adf_accel_dev * accel_dev ) ;
void ( * exit_arb ) ( struct adf_accel_dev * accel_dev ) ;
2021-01-04 16:55:46 +00:00
const u32 * ( * get_arb_mapping ) ( void ) ;
2021-09-16 15:45:41 +01:00
int ( * init_device ) ( struct adf_accel_dev * accel_dev ) ;
2015-08-07 11:34:25 -07:00
void ( * disable_iov ) ( struct adf_accel_dev * accel_dev ) ;
2020-10-12 21:38:20 +01:00
void ( * configure_iov_threads ) ( struct adf_accel_dev * accel_dev ,
bool enable ) ;
crypto: qat - fix device reset flow
When the device needs a reset, e.g. when an uncorrectable PCIe AER event
occurs, various services/data structures need to be cleaned up, the
hardware reset and the services/data structures initialized and started.
The code to perform the cleanup and initialization was not performed when
a device reset was done.
This patch moves some of the initialization code out of the .probe entry-
point into a separate function that is now called during probe as well as
after the hardware has been reset. Similarly, a new function is added for
first cleaning up these services/data structures prior to resetting. The
new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for
which there are already prototypes but no actual functions just yet and are
now called when the device is reset and during probe/cleanup of the driver.
The down and up flows via ioctl calls has similarly been updated.
In addition, there are two other bugs in the reset flow - one in the logic
for determining whether to schedule a device reset upon receiving an
uncorrectable AER event which prevents the reset flow from being initiated,
and another with clearing the status bit indicating a device is configured
(when resetting the device the configuration remains across the reset so
the bit should not be cleared, otherwise, the necessary services will not
be re-started in adf_dev_start() after the reset - clear the bit only when
actually deleting the configuration).
Signed-off-by: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
void ( * enable_ints ) ( struct adf_accel_dev * accel_dev ) ;
2021-04-09 14:56:19 +01:00
void ( * set_ssm_wdtimer ) ( struct adf_accel_dev * accel_dev ) ;
2016-07-04 16:26:00 +01:00
void ( * reset_device ) ( struct adf_accel_dev * accel_dev ) ;
2020-11-13 16:46:42 +00:00
void ( * set_msix_rttable ) ( struct adf_accel_dev * accel_dev ) ;
2020-11-13 16:46:41 +00:00
char * ( * uof_get_name ) ( u32 obj_num ) ;
u32 ( * uof_get_num_objs ) ( void ) ;
u32 ( * uof_get_ae_mask ) ( u32 obj_num ) ;
2021-11-17 14:30:47 +00:00
struct adf_pfvf_ops pfvf_ops ;
2020-10-12 21:38:21 +01:00
struct adf_hw_csr_ops csr_ops ;
2014-06-05 13:42:39 -07:00
const char * fw_name ;
2015-07-15 15:28:32 -07:00
const char * fw_mmp_name ;
2020-06-03 18:33:44 +01:00
u32 fuses ;
2020-10-12 21:38:18 +01:00
u32 straps ;
2020-06-03 18:33:44 +01:00
u32 accel_capabilities_mask ;
u32 instance_id ;
u16 accel_mask ;
2020-10-12 21:38:47 +01:00
u32 ae_mask ;
2020-10-12 21:38:27 +01:00
u32 admin_ae_mask ;
2020-06-03 18:33:44 +01:00
u16 tx_rings_mask ;
u8 tx_rx_gap ;
u8 num_banks ;
2020-10-12 21:38:19 +01:00
u8 num_rings_per_bank ;
2020-06-03 18:33:44 +01:00
u8 num_accel ;
u8 num_logical_accel ;
u8 num_engines ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
/* CSR write macro */
# define ADF_CSR_WR(csr_base, csr_offset, val) \
__raw_writel ( val , csr_base + csr_offset )
/* CSR read macro */
# define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
# define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
# define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
# define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
# define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
2020-10-12 21:38:19 +01:00
# define GET_NUM_RINGS_PER_BANK(accel_dev) \
GET_HW_DATA ( accel_dev ) - > num_rings_per_bank
2014-06-05 13:42:39 -07:00
# define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
2020-10-12 21:38:21 +01:00
# define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
2021-11-17 14:30:47 +00:00
# define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
2014-06-05 13:42:39 -07:00
# define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms ;
struct icp_qat_fw_loader_handle ;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle * fw_loader ;
const struct firmware * uof_fw ;
2015-07-15 15:28:32 -07:00
const struct firmware * mmp_fw ;
2014-06-05 13:42:39 -07:00
} ;
2015-08-07 11:34:25 -07:00
struct adf_accel_vf_info {
struct adf_accel_dev * accel_dev ;
struct mutex pf2vf_lock ; /* protect CSR access for PF2VF messages */
struct ratelimit_state vf2pf_ratelimit ;
u32 vf_nr ;
bool init ;
} ;
2014-06-05 13:42:39 -07:00
struct adf_accel_dev {
struct adf_etr_data * transport ;
struct adf_hw_device_data * hw_device ;
struct adf_cfg_device_data * cfg ;
struct adf_fw_loader_data * fw_loader ;
struct adf_admin_comms * admin ;
struct list_head crypto_list ;
unsigned long status ;
atomic_t ref_count ;
struct dentry * debugfs_dir ;
struct list_head list ;
struct module * owner ;
struct adf_accel_pci accel_pci_dev ;
2015-08-07 11:34:25 -07:00
union {
struct {
2021-08-12 21:21:21 +01:00
/* protects VF2PF interrupts access */
spinlock_t vf2pf_ints_lock ;
2015-08-07 11:34:25 -07:00
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info * vf_info ;
} pf ;
struct {
2021-09-01 18:36:07 +01:00
bool irq_enabled ;
char irq_name [ ADF_MAX_MSIX_VECTOR_NAME ] ;
2015-08-07 11:34:25 -07:00
struct tasklet_struct pf2vf_bh_tasklet ;
struct mutex vf2pf_lock ; /* protect CSR access */
2021-11-17 14:30:54 +00:00
struct completion msg_received ;
u32 response ; /* temp field holding pf2vf response */
2020-06-03 18:33:44 +01:00
u8 pf_version ;
2015-08-07 11:34:25 -07:00
} vf ;
} ;
bool is_vf ;
2015-12-10 14:23:03 -08:00
u32 accel_id ;
2021-09-02 09:34:59 +01:00
} ;
2014-06-05 13:42:39 -07:00
# endif