linux/drivers/crypto/qat/qat_common/adf_accel_devices.h

300 lines
9.1 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
/* Copyright(c) 2014 - 2020 Intel Corporation */
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/ratelimit.h>
#include "adf_cfg_common.h"
#include "adf_pfvf_msg.h"
#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
#define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
#define ADF_C62X_DEVICE_NAME "c6xx"
#define ADF_C62XVF_DEVICE_NAME "c6xxvf"
#define ADF_C3XXX_DEVICE_NAME "c3xxx"
#define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
#define ADF_4XXX_DEVICE_NAME "4xxx"
#define ADF_4XXX_PCI_DEVICE_ID 0x4940
#define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
#define ADF_DEVICE_FUSECTL_OFFSET 0x40
#define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
#define ADF_DEVICE_FUSECTL_MASK 0x80000000
#define ADF_PCI_MAX_BARS 3
#define ADF_DEVICE_NAME_LENGTH 32
#define ADF_ETR_MAX_RINGS_PER_BANK 16
#define ADF_MAX_MSIX_VECTOR_NAME 16
#define ADF_DEVICE_NAME_PREFIX "qat_"
enum adf_accel_capabilities {
ADF_ACCEL_CAPABILITIES_NULL = 0,
ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
ADF_ACCEL_CAPABILITIES_CIPHER = 4,
ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
};
struct adf_bar {
resource_size_t base_addr;
void __iomem *virt_addr;
resource_size_t size;
};
struct adf_irq {
bool enabled;
char name[ADF_MAX_MSIX_VECTOR_NAME];
};
struct adf_accel_msix {
struct adf_irq *irqs;
u32 num_entries;
};
struct adf_accel_pci {
struct pci_dev *pci_dev;
struct adf_accel_msix msix_entries;
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
u8 revid;
u8 sku;
};
enum dev_state {
DEV_DOWN = 0,
DEV_UP
};
enum dev_sku_info {
DEV_SKU_1 = 0,
DEV_SKU_2,
DEV_SKU_3,
DEV_SKU_4,
DEV_SKU_VF,
DEV_SKU_UNKNOWN,
};
static inline const char *get_sku_info(enum dev_sku_info info)
{
switch (info) {
case DEV_SKU_1:
return "SKU1";
case DEV_SKU_2:
return "SKU2";
case DEV_SKU_3:
return "SKU3";
case DEV_SKU_4:
return "SKU4";
case DEV_SKU_VF:
return "SKUVF";
case DEV_SKU_UNKNOWN:
default:
break;
}
return "Unknown SKU";
}
struct adf_hw_device_class {
const char *name;
const enum adf_device_type type;
u32 instances;
};
struct arb_info {
u32 arb_cfg;
u32 arb_offset;
u32 wt2sam_offset;
};
struct admin_info {
u32 admin_msg_ur;
u32 admin_msg_lr;
u32 mailbox_offset;
};
struct adf_hw_csr_ops {
u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
u32 ring);
void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
u32 ring);
void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
u32 ring, u32 value);
void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
u32 ring, dma_addr_t addr);
void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
u32 value);
void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
u32 value);
void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
u32 bank, u32 value);
void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
u32 value);
};
struct adf_cfg_device_data;
struct adf_accel_dev;
struct adf_etr_data;
struct adf_etr_ring_data;
struct adf_pfvf_ops {
int (*enable_comms)(struct adf_accel_dev *accel_dev);
u32 (*get_pf2vf_offset)(u32 i);
u32 (*get_vf2pf_offset)(u32 i);
void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
void (*disable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
crypto: qat - rework the VF2PF interrupt handling logic Change the VF2PF interrupt handler in the PF ISR and the definition of the internal PFVF API to correct the current implementation, which can result in missed interrupts. More specifically, current HW generations consider a write to the mask register, regardless of the value, as an acknowledge of any pending VF2PF interrupt. Therefore, if there is an interrupt between the source register read and the mask register write, such interrupt will not be delivered and silently acknowledged, resulting in a lost VF2PF message. To work around the problem, rather than disabling specific interrupts, disable all the interrupts and re-enable only the ones that we are not serving (excluding the already disabled ones too). This will force any other pending interrupt to be triggered and be serviced by a subsequent ISR. This new approach requires, however, changes to the interrupt related pfvf_ops functions. In particular, get_vf2pf_sources() has now been removed in favor of disable_pending_vf2pf_interrupts(), which not only retrieves and returns the pending (and enabled) sources, but also disables them. As a consequence, introduce the adf_disable_pending_vf2pf_interrupts() utility in place of adf_disable_vf2pf_interrupts_irq(), which is no longer needed. Cc: stable@vger.kernel.org Fixes: 993161d ("crypto: qat - fix handling of VF to PF interrupts") Signed-off-by: Marco Chiappero <marco.chiappero@intel.com> Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2022-04-07 17:54:51 +01:00
u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
u32 pfvf_offset, struct mutex *csr_lock);
struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
crypto: qat - support fast ACKs in the PFVF protocol The original design and current implementation of the PFVF protocol expects the sender to both acquire and relinquish the ownership of the shared CSR by setting and clearing the "in use" pattern on the remote half of the register when sending a message. This happens regardless of the acknowledgment of the reception, to guarantee changes, including collisions, are surely detected. However, in the case of a request that requires a response, collisions can also be detected by the lack of a reply. This can be exploited to speed up and simplify the above behaviour, letting the receiver both acknowledge the message and release the CSR in a single transaction: 1) the sender can return as soon as the message has been acknowledged 2) the receiver doesn't have to wait long before acquiring ownership of the CSR for the response message, greatly improving the overall throughput. Howerver, this improvement cannot be leveraged for fire-and-forget notifications, as it would be impossible for the sender to clearly distinguish between a collision and an ack immediately followed by a new message. This patch implements this optimization in a new version of the protocol (v3), which applies the fast-ack logic only whenever possible and guarantees backward compatibility with older versions. For requests, a new retry loop guarantees a correct behaviour. Signed-off-by: Marco Chiappero <marco.chiappero@intel.com> Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Fiona Trahe <fiona.trahe@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-12-16 09:13:28 +00:00
u32 pfvf_offset, u8 compat_ver);
};
struct adf_hw_device_data {
struct adf_hw_device_class *dev_class;
u32 (*get_accel_mask)(struct adf_hw_device_data *self);
u32 (*get_ae_mask)(struct adf_hw_device_data *self);
u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
u32 (*get_num_aes)(struct adf_hw_device_data *self);
u32 (*get_num_accels)(struct adf_hw_device_data *self);
void (*get_arb_info)(struct arb_info *arb_csrs_info);
void (*get_admin_info)(struct admin_info *admin_csrs_info);
enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
crypto: qat - fix device reset flow When the device needs a reset, e.g. when an uncorrectable PCIe AER event occurs, various services/data structures need to be cleaned up, the hardware reset and the services/data structures initialized and started. The code to perform the cleanup and initialization was not performed when a device reset was done. This patch moves some of the initialization code out of the .probe entry- point into a separate function that is now called during probe as well as after the hardware has been reset. Similarly, a new function is added for first cleaning up these services/data structures prior to resetting. The new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for which there are already prototypes but no actual functions just yet and are now called when the device is reset and during probe/cleanup of the driver. The down and up flows via ioctl calls has similarly been updated. In addition, there are two other bugs in the reset flow - one in the logic for determining whether to schedule a device reset upon receiving an uncorrectable AER event which prevents the reset flow from being initiated, and another with clearing the status bit indicating a device is configured (when resetting the device the configuration remains across the reset so the bit should not be cleared, otherwise, the necessary services will not be re-started in adf_dev_start() after the reset - clear the bit only when actually deleting the configuration). Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
int (*send_admin_init)(struct adf_accel_dev *accel_dev);
crypto: qat - fix device reset flow When the device needs a reset, e.g. when an uncorrectable PCIe AER event occurs, various services/data structures need to be cleaned up, the hardware reset and the services/data structures initialized and started. The code to perform the cleanup and initialization was not performed when a device reset was done. This patch moves some of the initialization code out of the .probe entry- point into a separate function that is now called during probe as well as after the hardware has been reset. Similarly, a new function is added for first cleaning up these services/data structures prior to resetting. The new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for which there are already prototypes but no actual functions just yet and are now called when the device is reset and during probe/cleanup of the driver. The down and up flows via ioctl calls has similarly been updated. In addition, there are two other bugs in the reset flow - one in the logic for determining whether to schedule a device reset upon receiving an uncorrectable AER event which prevents the reset flow from being initiated, and another with clearing the status bit indicating a device is configured (when resetting the device the configuration remains across the reset so the bit should not be cleared, otherwise, the necessary services will not be re-started in adf_dev_start() after the reset - clear the bit only when actually deleting the configuration). Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
crypto: qat - power up 4xxx device After reset or boot, QAT 4xxx devices are inactive and require to be explicitly activated. This is done by writing the DRV_ACTIVE bit in the PM_INTERRUPT register and polling the PM_INIT_STATE to make sure that the transaction has completed properly. If this is not done, the driver will fail the initialization sequence reporting the following message: [ 22.081193] 4xxx 0000:f7:00.0: enabling device (0140 -> 0142) [ 22.720285] QAT: AE0 is inactive!! [ 22.720287] QAT: failed to get device out of reset [ 22.720288] 4xxx 0000:f7:00.0: qat_hal_clr_reset error [ 22.720290] 4xxx 0000:f7:00.0: Failed to init the AEs [ 22.720290] 4xxx 0000:f7:00.0: Failed to initialise Acceleration Engine [ 22.720789] 4xxx 0000:f7:00.0: Resetting device qat_dev0 [ 22.825099] 4xxx: probe of 0000:f7:00.0 failed with error -14 The patch also temporarily disables the power management source of interrupt, to avoid possible spurious interrupts as the power management feature is not fully supported. The device init function has been added to adf_dev_init(), and not in the probe of 4xxx to make sure that the device is re-enabled in case of reset. Note that the error code reported by hw_data->init_device() in adf_dev_init() has been shadowed for consistency with the other calls in the same function. Fixes: 8c8268166e83 ("crypto: qat - add qat_4xxx driver") Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Wojciech Ziemba <wojciech.ziemba@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-09-16 15:45:41 +01:00
int (*init_device)(struct adf_accel_dev *accel_dev);
int (*enable_pm)(struct adf_accel_dev *accel_dev);
bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
crypto: qat - fix device reset flow When the device needs a reset, e.g. when an uncorrectable PCIe AER event occurs, various services/data structures need to be cleaned up, the hardware reset and the services/data structures initialized and started. The code to perform the cleanup and initialization was not performed when a device reset was done. This patch moves some of the initialization code out of the .probe entry- point into a separate function that is now called during probe as well as after the hardware has been reset. Similarly, a new function is added for first cleaning up these services/data structures prior to resetting. The new functions are adf_dev_init() and adf_dev_shutdown(), respectively, for which there are already prototypes but no actual functions just yet and are now called when the device is reset and during probe/cleanup of the driver. The down and up flows via ioctl calls has similarly been updated. In addition, there are two other bugs in the reset flow - one in the logic for determining whether to schedule a device reset upon receiving an uncorrectable AER event which prevents the reset flow from being initiated, and another with clearing the status bit indicating a device is configured (when resetting the device the configuration remains across the reset so the bit should not be cleared, otherwise, the necessary services will not be re-started in adf_dev_start() after the reset - clear the bit only when actually deleting the configuration). Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2015-01-09 11:54:58 -08:00
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
u32 (*uof_get_num_objs)(void);
u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
struct adf_pfvf_ops pfvf_ops;
struct adf_hw_csr_ops csr_ops;
const char *fw_name;
const char *fw_mmp_name;
u32 fuses;
u32 straps;
u32 accel_capabilities_mask;
u32 extended_dc_capabilities;
u32 clock_frequency;
u32 instance_id;
u16 accel_mask;
u32 ae_mask;
u32 admin_ae_mask;
u16 tx_rings_mask;
u16 ring_to_svc_map;
u8 tx_rx_gap;
u8 num_banks;
u16 num_banks_per_vf;
u8 num_rings_per_bank;
u8 num_accel;
u8 num_logical_accel;
u8 num_engines;
};
/* CSR write macro */
#define ADF_CSR_WR(csr_base, csr_offset, val) \
__raw_writel(val, csr_base + csr_offset)
/* CSR read macro */
#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
#define ADF_CFG_NUM_SERVICES 4
#define ADF_SRV_TYPE_BIT_LEN 3
#define ADF_SRV_TYPE_MASK 0x7
#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
#define GET_NUM_RINGS_PER_BANK(accel_dev) \
GET_HW_DATA(accel_dev)->num_rings_per_bank
#define GET_SRV_TYPE(accel_dev, idx) \
(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
& ADF_SRV_TYPE_MASK)
#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
#define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
#define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
struct adf_admin_comms;
struct icp_qat_fw_loader_handle;
struct adf_fw_loader_data {
struct icp_qat_fw_loader_handle *fw_loader;
const struct firmware *uof_fw;
const struct firmware *mmp_fw;
};
struct adf_accel_vf_info {
struct adf_accel_dev *accel_dev;
struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
struct ratelimit_state vf2pf_ratelimit;
u32 vf_nr;
bool init;
u8 vf_compat_ver;
};
struct adf_accel_dev {
struct adf_etr_data *transport;
struct adf_hw_device_data *hw_device;
struct adf_cfg_device_data *cfg;
struct adf_fw_loader_data *fw_loader;
struct adf_admin_comms *admin;
struct list_head crypto_list;
unsigned long status;
atomic_t ref_count;
struct dentry *debugfs_dir;
struct list_head list;
struct module *owner;
struct adf_accel_pci accel_pci_dev;
union {
struct {
crypto: qat - protect interrupt mask CSRs with a spinlock In the PF interrupt handler, the interrupt is disabled for a set of VFs by writing to the interrupt source mask register, ERRMSK. The interrupt is re-enabled in the bottom half handler by writing to the same CSR. This is done through the functions enable_vf2pf_interrupts() and disable_vf2pf_interrupts() which perform a read-modify-write operation on the ERRMSK registers to mask and unmask the source of interrupt. There can be a race condition where the top half handler for one VF interrupt runs just as the bottom half for another VF is about to re-enable the interrupt. Depending on whether the top or bottom half updates the CSR first, this would result either in a spurious interrupt or in the interrupt not being re-enabled. This patch protects the access of ERRMSK with a spinlock. The functions adf_enable_vf2pf_interrupts() and adf_disable_vf2pf_interrupts() have been changed to acquire a spin lock before accessing and modifying the ERRMSK registers. These functions use spin_lock_irqsave() to disable IRQs and avoid potential deadlocks. In addition, the function adf_disable_vf2pf_interrupts_irq() has been added. This uses spin_lock() and it is meant to be used in the top half only. Signed-off-by: Kanchana Velusamy <kanchanax.velusamy@intel.com> Co-developed-by: Marco Chiappero <marco.chiappero@intel.com> Signed-off-by: Marco Chiappero <marco.chiappero@intel.com> Co-developed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Signed-off-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com> Reviewed-by: Fiona Trahe <fiona.trahe@intel.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2021-08-12 21:21:21 +01:00
/* protects VF2PF interrupts access */
spinlock_t vf2pf_ints_lock;
/* vf_info is non-zero when SR-IOV is init'ed */
struct adf_accel_vf_info *vf_info;
} pf;
struct {
bool irq_enabled;
char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion msg_received;
struct pfvf_message response; /* temp field holding pf2vf response */
u8 pf_compat_ver;
} vf;
};
bool is_vf;
u32 accel_id;
};
#endif