crypto: qat - Enforce valid numa configuration

In a system with NUMA configuration we want to enforce that the accelerator is
connected to a node with memory to avoid cross QPI memory transaction.
Otherwise there is no point in using the accelerator as the encryption in
software will be faster.

Cc: stable@vger.kernel.org
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Tested-by: Nikolay Aleksandrov <nikolay@redhat.com>
Reviewed-by: Prarit Bhargava <prarit@redhat.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Tadeusz Struk 2014-10-13 18:24:32 -07:00 committed by Herbert Xu
parent 923a6e5e5f
commit 09adc8789c
7 changed files with 30 additions and 34 deletions

View File

@ -198,8 +198,7 @@ struct adf_accel_dev {
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
struct list_head list; struct list_head list;
struct module *owner; struct module *owner;
uint8_t accel_id;
uint8_t numa_node;
struct adf_accel_pci accel_pci_dev; struct adf_accel_pci accel_pci_dev;
uint8_t accel_id;
} __packed; } __packed;
#endif #endif

View File

@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
ring = &bank->rings[i]; ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) { if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights = kzalloc_node(sizeof(atomic_t), ring->inflights =
GFP_KERNEL, kzalloc_node(sizeof(atomic_t),
accel_dev->numa_node); GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!ring->inflights) if (!ring->inflights)
goto err; goto err;
} else { } else {
@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
int i, ret; int i, ret;
etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data) if (!etr_data)
return -ENOMEM; return -ENOMEM;
num_banks = GET_MAX_BANKS(accel_dev); num_banks = GET_MAX_BANKS(accel_dev);
size = num_banks * sizeof(struct adf_etr_bank_data); size = num_banks * sizeof(struct adf_etr_bank_data);
etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); etr_data->banks = kzalloc_node(size, GFP_KERNEL,
dev_to_node(&GET_DEV(accel_dev)));
if (!etr_data->banks) { if (!etr_data->banks) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_bank; goto err_bank;

View File

@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (unlikely(!n)) if (unlikely(!n))
return -EINVAL; return -EINVAL;
bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); bufl = kmalloc_node(sz, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl)) if (unlikely(!bufl))
return -ENOMEM; return -ENOMEM;
@ -642,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_alg_buf *bufers; struct qat_alg_buf *bufers;
buflout = kmalloc_node(sz, GFP_ATOMIC, buflout = kmalloc_node(sz, GFP_ATOMIC,
inst->accel_dev->numa_node); dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout)) if (unlikely(!buflout))
goto err; goto err;
bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);

View File

@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
list_for_each(itr, adf_devmgr_get_head()) { list_for_each(itr, adf_devmgr_get_head()) {
accel_dev = list_entry(itr, struct adf_accel_dev, list); accel_dev = list_entry(itr, struct adf_accel_dev, list);
if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
dev_to_node(&GET_DEV(accel_dev)) < 0)
&& adf_dev_started(accel_dev))
break; break;
accel_dev = NULL; accel_dev = NULL;
} }
if (!accel_dev) { if (!accel_dev) {
pr_err("QAT: Could not find device on give node\n"); pr_err("QAT: Could not find device on node %d\n", node);
accel_dev = adf_devmgr_get_first(); accel_dev = adf_devmgr_get_first();
} }
if (!accel_dev || !adf_dev_started(accel_dev)) if (!accel_dev || !adf_dev_started(accel_dev))
@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
for (i = 0; i < num_inst; i++) { for (i = 0; i < num_inst; i++) {
inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!inst) if (!inst)
goto err; goto err;

View File

@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
uint64_t reg_val; uint64_t reg_val;
admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
accel_dev->numa_node); dev_to_node(&GET_DEV(accel_dev)));
if (!admin) if (!admin)
return -ENOMEM; return -ENOMEM;
admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,

View File

@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev); kfree(accel_dev);
} }
static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
{
unsigned int bus_per_cpu = 0;
struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
if (!c->phys_proc_id)
return 0;
bus_per_cpu = 256 / (c->phys_proc_id + 1);
if (bus_per_cpu != 0)
return pdev->bus->number / bus_per_cpu;
return 0;
}
static int qat_dev_start(struct adf_accel_dev *accel_dev) static int qat_dev_start(struct adf_accel_dev *accel_dev)
{ {
int cpus = num_online_cpus(); int cpus = num_online_cpus();
@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *pmisc_bar_addr = NULL; void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH]; char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr; unsigned int i, bar_nr;
uint8_t node;
int ret; int ret;
switch (ent->device) { switch (ent->device) {
@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV; return -ENODEV;
} }
node = adf_get_dev_node_id(pdev); if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); /* If the accelerator is connected to a node with no memory
* there is no point in using the accelerator since the remote
* memory transaction will be very slow. */
dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
return -EINVAL;
}
accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!accel_dev) if (!accel_dev)
return -ENOMEM; return -ENOMEM;
accel_dev->numa_node = node;
INIT_LIST_HEAD(&accel_dev->crypto_list); INIT_LIST_HEAD(&accel_dev->crypto_list);
/* Add accel device to accel table. /* Add accel device to accel table.
@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
accel_dev->owner = THIS_MODULE; accel_dev->owner = THIS_MODULE;
/* Allocate and configure device configuration structure */ /* Allocate and configure device configuration structure */
hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
dev_to_node(&pdev->dev));
if (!hw_data) { if (!hw_data) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_err; goto out_err;

View File

@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
uint32_t msix_num_entries = hw_data->num_banks + 1; uint32_t msix_num_entries = hw_data->num_banks + 1;
entries = kzalloc_node(msix_num_entries * sizeof(*entries), entries = kzalloc_node(msix_num_entries * sizeof(*entries),
GFP_KERNEL, accel_dev->numa_node); GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
if (!entries) if (!entries)
return -ENOMEM; return -ENOMEM;