When NX encounters translation error on CRB and any request buffer, raises an interrupt on the CPU to handle the fault. It can raise one interrupt for multiple faults. Expects OS to handle these faults and return credits for fault window after processing faults. Setup thread IRQ handler and IRQ thread function per each VAS instance. IRQ handler checks if the thread is already woken up and can handle new faults. If so returns with IRQ_HANDLED, otherwise wake up thread to process new faults. The thread functions reads each CRB entry from fault FIFO until sees invalid entry. After reading each CRB, determine the corresponding send window using pswid (from CRB) and process fault CRB. Then invalidate the entry and return credit. Processing fault CRB and return credit is described in subsequent patches. Signed-off-by: Sukadev Bhattiprolu <sukadev@linux.vnet.ibm.com> Signed-off-by: Haren Myneni <haren@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/1587016982.2275.1060.camel@hbabu-laptop
249 lines
5.1 KiB
C
249 lines
5.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Copyright 2016-17 IBM Corp.
|
|
*/
|
|
|
|
#define pr_fmt(fmt) "vas: " fmt
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/types.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/platform_device.h>
|
|
#include <linux/of_platform.h>
|
|
#include <linux/of_address.h>
|
|
#include <linux/of.h>
|
|
#include <linux/irqdomain.h>
|
|
#include <linux/interrupt.h>
|
|
#include <asm/prom.h>
|
|
#include <asm/xive.h>
|
|
|
|
#include "vas.h"
|
|
|
|
DEFINE_MUTEX(vas_mutex);
|
|
static LIST_HEAD(vas_instances);
|
|
|
|
static DEFINE_PER_CPU(int, cpu_vas_id);
|
|
|
|
static int vas_irq_fault_window_setup(struct vas_instance *vinst)
|
|
{
|
|
char devname[64];
|
|
int rc = 0;
|
|
|
|
snprintf(devname, sizeof(devname), "vas-%d", vinst->vas_id);
|
|
rc = request_threaded_irq(vinst->virq, vas_fault_handler,
|
|
vas_fault_thread_fn, 0, devname, vinst);
|
|
|
|
if (rc) {
|
|
pr_err("VAS[%d]: Request IRQ(%d) failed with %d\n",
|
|
vinst->vas_id, vinst->virq, rc);
|
|
goto out;
|
|
}
|
|
|
|
rc = vas_setup_fault_window(vinst);
|
|
if (rc)
|
|
free_irq(vinst->virq, vinst);
|
|
|
|
out:
|
|
return rc;
|
|
}
|
|
|
|
static int init_vas_instance(struct platform_device *pdev)
|
|
{
|
|
struct device_node *dn = pdev->dev.of_node;
|
|
struct vas_instance *vinst;
|
|
struct xive_irq_data *xd;
|
|
uint32_t chipid, hwirq;
|
|
struct resource *res;
|
|
int rc, cpu, vasid;
|
|
|
|
rc = of_property_read_u32(dn, "ibm,vas-id", &vasid);
|
|
if (rc) {
|
|
pr_err("No ibm,vas-id property for %s?\n", pdev->name);
|
|
return -ENODEV;
|
|
}
|
|
|
|
rc = of_property_read_u32(dn, "ibm,chip-id", &chipid);
|
|
if (rc) {
|
|
pr_err("No ibm,chip-id property for %s?\n", pdev->name);
|
|
return -ENODEV;
|
|
}
|
|
|
|
if (pdev->num_resources != 4) {
|
|
pr_err("Unexpected DT configuration for [%s, %d]\n",
|
|
pdev->name, vasid);
|
|
return -ENODEV;
|
|
}
|
|
|
|
vinst = kzalloc(sizeof(*vinst), GFP_KERNEL);
|
|
if (!vinst)
|
|
return -ENOMEM;
|
|
|
|
INIT_LIST_HEAD(&vinst->node);
|
|
ida_init(&vinst->ida);
|
|
mutex_init(&vinst->mutex);
|
|
vinst->vas_id = vasid;
|
|
vinst->pdev = pdev;
|
|
|
|
res = &pdev->resource[0];
|
|
vinst->hvwc_bar_start = res->start;
|
|
|
|
res = &pdev->resource[1];
|
|
vinst->uwc_bar_start = res->start;
|
|
|
|
res = &pdev->resource[2];
|
|
vinst->paste_base_addr = res->start;
|
|
|
|
res = &pdev->resource[3];
|
|
if (res->end > 62) {
|
|
pr_err("Bad 'paste_win_id_shift' in DT, %llx\n", res->end);
|
|
goto free_vinst;
|
|
}
|
|
|
|
vinst->paste_win_id_shift = 63 - res->end;
|
|
|
|
hwirq = xive_native_alloc_irq_on_chip(chipid);
|
|
if (!hwirq) {
|
|
pr_err("Inst%d: Unable to allocate global irq for chip %d\n",
|
|
vinst->vas_id, chipid);
|
|
return -ENOENT;
|
|
}
|
|
|
|
vinst->virq = irq_create_mapping(NULL, hwirq);
|
|
if (!vinst->virq) {
|
|
pr_err("Inst%d: Unable to map global irq %d\n",
|
|
vinst->vas_id, hwirq);
|
|
return -EINVAL;
|
|
}
|
|
|
|
xd = irq_get_handler_data(vinst->virq);
|
|
if (!xd) {
|
|
pr_err("Inst%d: Invalid virq %d\n",
|
|
vinst->vas_id, vinst->virq);
|
|
return -EINVAL;
|
|
}
|
|
|
|
vinst->irq_port = xd->trig_page;
|
|
pr_devel("Initialized instance [%s, %d] paste_base 0x%llx paste_win_id_shift 0x%llx IRQ %d Port 0x%llx\n",
|
|
pdev->name, vasid, vinst->paste_base_addr,
|
|
vinst->paste_win_id_shift, vinst->virq,
|
|
vinst->irq_port);
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
if (cpu_to_chip_id(cpu) == of_get_ibm_chip_id(dn))
|
|
per_cpu(cpu_vas_id, cpu) = vasid;
|
|
}
|
|
|
|
mutex_lock(&vas_mutex);
|
|
list_add(&vinst->node, &vas_instances);
|
|
mutex_unlock(&vas_mutex);
|
|
|
|
spin_lock_init(&vinst->fault_lock);
|
|
/*
|
|
* IRQ and fault handling setup is needed only for user space
|
|
* send windows.
|
|
*/
|
|
if (vinst->virq) {
|
|
rc = vas_irq_fault_window_setup(vinst);
|
|
/*
|
|
* Fault window is used only for user space send windows.
|
|
* So if vinst->virq is NULL, tx_win_open returns -ENODEV
|
|
* for user space.
|
|
*/
|
|
if (rc)
|
|
vinst->virq = 0;
|
|
}
|
|
|
|
vas_instance_init_dbgdir(vinst);
|
|
|
|
dev_set_drvdata(&pdev->dev, vinst);
|
|
|
|
return 0;
|
|
|
|
free_vinst:
|
|
kfree(vinst);
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
/*
|
|
* Although this is read/used multiple times, it is written to only
|
|
* during initialization.
|
|
*/
|
|
struct vas_instance *find_vas_instance(int vasid)
|
|
{
|
|
struct list_head *ent;
|
|
struct vas_instance *vinst;
|
|
|
|
mutex_lock(&vas_mutex);
|
|
|
|
if (vasid == -1)
|
|
vasid = per_cpu(cpu_vas_id, smp_processor_id());
|
|
|
|
list_for_each(ent, &vas_instances) {
|
|
vinst = list_entry(ent, struct vas_instance, node);
|
|
if (vinst->vas_id == vasid) {
|
|
mutex_unlock(&vas_mutex);
|
|
return vinst;
|
|
}
|
|
}
|
|
mutex_unlock(&vas_mutex);
|
|
|
|
pr_devel("Instance %d not found\n", vasid);
|
|
return NULL;
|
|
}
|
|
|
|
int chip_to_vas_id(int chipid)
|
|
{
|
|
int cpu;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
if (cpu_to_chip_id(cpu) == chipid)
|
|
return per_cpu(cpu_vas_id, cpu);
|
|
}
|
|
return -1;
|
|
}
|
|
EXPORT_SYMBOL(chip_to_vas_id);
|
|
|
|
static int vas_probe(struct platform_device *pdev)
|
|
{
|
|
return init_vas_instance(pdev);
|
|
}
|
|
|
|
static const struct of_device_id powernv_vas_match[] = {
|
|
{ .compatible = "ibm,vas",},
|
|
{},
|
|
};
|
|
|
|
static struct platform_driver vas_driver = {
|
|
.driver = {
|
|
.name = "vas",
|
|
.of_match_table = powernv_vas_match,
|
|
},
|
|
.probe = vas_probe,
|
|
};
|
|
|
|
static int __init vas_init(void)
|
|
{
|
|
int found = 0;
|
|
struct device_node *dn;
|
|
|
|
platform_driver_register(&vas_driver);
|
|
|
|
for_each_compatible_node(dn, NULL, "ibm,vas") {
|
|
of_platform_device_create(dn, NULL, NULL);
|
|
found++;
|
|
}
|
|
|
|
if (!found) {
|
|
platform_driver_unregister(&vas_driver);
|
|
return -ENODEV;
|
|
}
|
|
|
|
pr_devel("Found %d instances\n", found);
|
|
|
|
return 0;
|
|
}
|
|
device_initcall(vas_init);
|