drm/nouveau/intr: add shared interrupt plumbing between pci/tegra
Unifies the handling between PCI-based and Tegra GPUs, and makes more explicit/obvious where device interrupts can be expected. Signed-off-by: Ben Skeggs <bskeggs@redhat.com> Reviewed-by: Lyude Paul <lyude@redhat.com>
This commit is contained in:
parent
eec3f6dfed
commit
727fd72f24
@ -2,6 +2,7 @@
|
||||
#ifndef __NVKM_DEVICE_H__
|
||||
#define __NVKM_DEVICE_H__
|
||||
#include <core/oclass.h>
|
||||
#include <core/intr.h>
|
||||
enum nvkm_subdev_type;
|
||||
|
||||
enum nvkm_device_type {
|
||||
@ -60,6 +61,13 @@ struct nvkm_device {
|
||||
#undef NVKM_LAYOUT_INST
|
||||
#undef NVKM_LAYOUT_ONCE
|
||||
struct list_head subdev;
|
||||
|
||||
struct {
|
||||
spinlock_t lock;
|
||||
int irq;
|
||||
bool alloc;
|
||||
bool armed;
|
||||
} intr;
|
||||
};
|
||||
|
||||
struct nvkm_subdev *nvkm_device_subdev(struct nvkm_device *, int type, int inst);
|
||||
@ -72,6 +80,7 @@ struct nvkm_device_func {
|
||||
int (*preinit)(struct nvkm_device *);
|
||||
int (*init)(struct nvkm_device *);
|
||||
void (*fini)(struct nvkm_device *, bool suspend);
|
||||
int (*irq)(struct nvkm_device *);
|
||||
resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
|
||||
resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
|
||||
bool cpu_coherent;
|
||||
|
12
drivers/gpu/drm/nouveau/include/nvkm/core/intr.h
Normal file
12
drivers/gpu/drm/nouveau/include/nvkm/core/intr.h
Normal file
@ -0,0 +1,12 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
#ifndef __NVKM_INTR_H__
|
||||
#define __NVKM_INTR_H__
|
||||
#include <core/os.h>
|
||||
struct nvkm_device;
|
||||
|
||||
void nvkm_intr_ctor(struct nvkm_device *);
|
||||
void nvkm_intr_dtor(struct nvkm_device *);
|
||||
int nvkm_intr_install(struct nvkm_device *);
|
||||
void nvkm_intr_unarm(struct nvkm_device *);
|
||||
void nvkm_intr_rearm(struct nvkm_device *);
|
||||
#endif
|
@ -8,7 +8,6 @@ struct nvkm_device_tegra {
|
||||
const struct nvkm_device_tegra_func *func;
|
||||
struct nvkm_device device;
|
||||
struct platform_device *pdev;
|
||||
int irq;
|
||||
|
||||
struct reset_control *rst;
|
||||
struct clk *clk;
|
||||
|
@ -13,7 +13,6 @@ struct nvkm_pci {
|
||||
const struct nvkm_pci_func *func;
|
||||
struct nvkm_subdev subdev;
|
||||
struct pci_dev *pdev;
|
||||
int irq;
|
||||
|
||||
struct {
|
||||
struct agp_bridge_data *bridge;
|
||||
@ -38,6 +37,7 @@ void nvkm_pci_wr08(struct nvkm_pci *, u16 addr, u8 data);
|
||||
void nvkm_pci_wr32(struct nvkm_pci *, u16 addr, u32 data);
|
||||
u32 nvkm_pci_mask(struct nvkm_pci *, u16 addr, u32 mask, u32 value);
|
||||
void nvkm_pci_rom_shadow(struct nvkm_pci *, bool shadow);
|
||||
void nvkm_pci_msi_rearm(struct nvkm_device *);
|
||||
|
||||
int nv04_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
|
||||
int nv40_pci_new(struct nvkm_device *, enum nvkm_subdev_type, int inst, struct nvkm_pci **);
|
||||
|
@ -5,6 +5,7 @@ nvkm-y += nvkm/core/enum.o
|
||||
nvkm-y += nvkm/core/event.o
|
||||
nvkm-y += nvkm/core/firmware.o
|
||||
nvkm-y += nvkm/core/gpuobj.o
|
||||
nvkm-y += nvkm/core/intr.o
|
||||
nvkm-y += nvkm/core/ioctl.o
|
||||
nvkm-y += nvkm/core/memory.o
|
||||
nvkm-y += nvkm/core/mm.o
|
||||
|
109
drivers/gpu/drm/nouveau/nvkm/core/intr.c
Normal file
109
drivers/gpu/drm/nouveau/nvkm/core/intr.c
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright 2021 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
#include <core/intr.h>
|
||||
|
||||
#include <subdev/pci.h>
|
||||
#include <subdev/mc.h>
|
||||
|
||||
static void
|
||||
nvkm_intr_rearm_locked(struct nvkm_device *device)
|
||||
{
|
||||
nvkm_mc_intr_rearm(device);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_intr_unarm_locked(struct nvkm_device *device)
|
||||
{
|
||||
nvkm_mc_intr_unarm(device);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
nvkm_intr(int irq, void *arg)
|
||||
{
|
||||
struct nvkm_device *device = arg;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
bool handled;
|
||||
|
||||
spin_lock(&device->intr.lock);
|
||||
if (!device->intr.armed)
|
||||
goto done_unlock;
|
||||
|
||||
nvkm_intr_unarm_locked(device);
|
||||
nvkm_pci_msi_rearm(device);
|
||||
|
||||
nvkm_mc_intr(device, &handled);
|
||||
if (handled)
|
||||
ret = IRQ_HANDLED;
|
||||
|
||||
nvkm_intr_rearm_locked(device);
|
||||
done_unlock:
|
||||
spin_unlock(&device->intr.lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_intr_rearm(struct nvkm_device *device)
|
||||
{
|
||||
spin_lock_irq(&device->intr.lock);
|
||||
nvkm_intr_rearm_locked(device);
|
||||
device->intr.armed = true;
|
||||
spin_unlock_irq(&device->intr.lock);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_intr_unarm(struct nvkm_device *device)
|
||||
{
|
||||
spin_lock_irq(&device->intr.lock);
|
||||
nvkm_intr_unarm_locked(device);
|
||||
device->intr.armed = false;
|
||||
spin_unlock_irq(&device->intr.lock);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_intr_install(struct nvkm_device *device)
|
||||
{
|
||||
int ret;
|
||||
|
||||
device->intr.irq = device->func->irq(device);
|
||||
if (device->intr.irq < 0)
|
||||
return device->intr.irq;
|
||||
|
||||
ret = request_irq(device->intr.irq, nvkm_intr, IRQF_SHARED, "nvkm", device);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
device->intr.alloc = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_intr_dtor(struct nvkm_device *device)
|
||||
{
|
||||
if (device->intr.alloc)
|
||||
free_irq(device->intr.irq, device);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_intr_ctor(struct nvkm_device *device)
|
||||
{
|
||||
spin_lock_init(&device->intr.lock);
|
||||
}
|
@ -2734,6 +2734,8 @@ nvkm_device_fini(struct nvkm_device *device, bool suspend)
|
||||
if (device->func->fini)
|
||||
device->func->fini(device, suspend);
|
||||
|
||||
nvkm_intr_unarm(device);
|
||||
|
||||
time = ktime_to_us(ktime_get()) - time;
|
||||
nvdev_trace(device, "%s completed in %lldus...\n", action, time);
|
||||
return 0;
|
||||
@ -2759,6 +2761,8 @@ nvkm_device_preinit(struct nvkm_device *device)
|
||||
nvdev_trace(device, "preinit running...\n");
|
||||
time = ktime_to_us(ktime_get());
|
||||
|
||||
nvkm_intr_unarm(device);
|
||||
|
||||
if (device->func->preinit) {
|
||||
ret = device->func->preinit(device);
|
||||
if (ret)
|
||||
@ -2804,6 +2808,8 @@ nvkm_device_init(struct nvkm_device *device)
|
||||
nvdev_trace(device, "init running...\n");
|
||||
time = ktime_to_us(ktime_get());
|
||||
|
||||
nvkm_intr_rearm(device);
|
||||
|
||||
if (device->func->init) {
|
||||
ret = device->func->init(device);
|
||||
if (ret)
|
||||
@ -2841,6 +2847,8 @@ nvkm_device_del(struct nvkm_device **pdevice)
|
||||
if (device) {
|
||||
mutex_lock(&nv_devices_mutex);
|
||||
|
||||
nvkm_intr_dtor(device);
|
||||
|
||||
list_for_each_entry_safe_reverse(subdev, subtmp, &device->subdev, head)
|
||||
nvkm_subdev_del(&subdev);
|
||||
|
||||
@ -3148,6 +3156,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
device->name = device->chip->name;
|
||||
|
||||
mutex_init(&device->mutex);
|
||||
nvkm_intr_ctor(device);
|
||||
|
||||
#define NVKM_LAYOUT_ONCE(type,data,ptr) \
|
||||
if (device->chip->ptr.inst && (subdev_mask & (BIT_ULL(type)))) { \
|
||||
@ -3189,7 +3198,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
#undef NVKM_LAYOUT_INST
|
||||
#undef NVKM_LAYOUT_ONCE
|
||||
|
||||
ret = 0;
|
||||
ret = nvkm_intr_install(device);
|
||||
done:
|
||||
if (device->pri && (!mmio || ret)) {
|
||||
iounmap(device->pri);
|
||||
|
@ -1574,6 +1574,12 @@ nvkm_device_pci_resource_size(struct nvkm_device *device, unsigned bar)
|
||||
return pci_resource_len(pdev->pdev, bar);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_device_pci_irq(struct nvkm_device *device)
|
||||
{
|
||||
return nvkm_device_pci(device)->pdev->irq;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_device_pci_fini(struct nvkm_device *device, bool suspend)
|
||||
{
|
||||
@ -1612,6 +1618,7 @@ nvkm_device_pci_func = {
|
||||
.dtor = nvkm_device_pci_dtor,
|
||||
.preinit = nvkm_device_pci_preinit,
|
||||
.fini = nvkm_device_pci_fini,
|
||||
.irq = nvkm_device_pci_irq,
|
||||
.resource_addr = nvkm_device_pci_resource_addr,
|
||||
.resource_size = nvkm_device_pci_resource_size,
|
||||
.cpu_coherent = !IS_ENABLED(CONFIG_ARM),
|
||||
|
@ -206,45 +206,12 @@ nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar)
|
||||
return res ? resource_size(res) : 0;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
nvkm_device_tegra_intr(int irq, void *arg)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = arg;
|
||||
struct nvkm_device *device = &tdev->device;
|
||||
bool handled = false;
|
||||
nvkm_mc_intr_unarm(device);
|
||||
nvkm_mc_intr(device, &handled);
|
||||
nvkm_mc_intr_rearm(device);
|
||||
return handled ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
|
||||
if (tdev->irq) {
|
||||
free_irq(tdev->irq, tdev);
|
||||
tdev->irq = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_device_tegra_init(struct nvkm_device *device)
|
||||
nvkm_device_tegra_irq(struct nvkm_device *device)
|
||||
{
|
||||
struct nvkm_device_tegra *tdev = nvkm_device_tegra(device);
|
||||
int irq, ret;
|
||||
|
||||
irq = platform_get_irq_byname(tdev->pdev, "stall");
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
ret = request_irq(irq, nvkm_device_tegra_intr,
|
||||
IRQF_SHARED, "nvkm", tdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
tdev->irq = irq;
|
||||
return 0;
|
||||
return platform_get_irq_byname(tdev->pdev, "stall");
|
||||
}
|
||||
|
||||
static void *
|
||||
@ -260,8 +227,7 @@ static const struct nvkm_device_func
|
||||
nvkm_device_tegra_func = {
|
||||
.tegra = nvkm_device_tegra,
|
||||
.dtor = nvkm_device_tegra_dtor,
|
||||
.init = nvkm_device_tegra_init,
|
||||
.fini = nvkm_device_tegra_fini,
|
||||
.irq = nvkm_device_tegra_irq,
|
||||
.resource_addr = nvkm_device_tegra_resource_addr,
|
||||
.resource_size = nvkm_device_tegra_resource_size,
|
||||
.cpu_coherent = false,
|
||||
|
@ -26,7 +26,15 @@
|
||||
|
||||
#include <core/option.h>
|
||||
#include <core/pci.h>
|
||||
#include <subdev/mc.h>
|
||||
|
||||
void
|
||||
nvkm_pci_msi_rearm(struct nvkm_device *device)
|
||||
{
|
||||
struct nvkm_pci *pci = device->pci;
|
||||
|
||||
if (pci && pci->msi)
|
||||
pci->func->msi_rearm(pci);
|
||||
}
|
||||
|
||||
u32
|
||||
nvkm_pci_rd32(struct nvkm_pci *pci, u16 addr)
|
||||
@ -65,24 +73,6 @@ nvkm_pci_rom_shadow(struct nvkm_pci *pci, bool shadow)
|
||||
nvkm_pci_wr32(pci, 0x0050, data);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
nvkm_pci_intr(int irq, void *arg)
|
||||
{
|
||||
struct nvkm_pci *pci = arg;
|
||||
struct nvkm_device *device = pci->subdev.device;
|
||||
bool handled = false;
|
||||
|
||||
if (pci->irq < 0)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
nvkm_mc_intr_unarm(device);
|
||||
if (pci->msi)
|
||||
pci->func->msi_rearm(pci);
|
||||
nvkm_mc_intr(device, &handled);
|
||||
nvkm_mc_intr_rearm(device);
|
||||
return handled ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_pci_fini(struct nvkm_subdev *subdev, bool suspend)
|
||||
{
|
||||
@ -107,7 +97,6 @@ static int
|
||||
nvkm_pci_oneinit(struct nvkm_subdev *subdev)
|
||||
{
|
||||
struct nvkm_pci *pci = nvkm_pci(subdev);
|
||||
struct pci_dev *pdev = pci->pdev;
|
||||
int ret;
|
||||
|
||||
if (pci_is_pcie(pci->pdev)) {
|
||||
@ -116,11 +105,6 @@ nvkm_pci_oneinit(struct nvkm_subdev *subdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = request_irq(pdev->irq, nvkm_pci_intr, IRQF_SHARED, "nvkm", pci);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pci->irq = pdev->irq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -157,15 +141,6 @@ nvkm_pci_dtor(struct nvkm_subdev *subdev)
|
||||
|
||||
nvkm_agp_dtor(pci);
|
||||
|
||||
if (pci->irq >= 0) {
|
||||
/* freq_irq() will call the handler, we use pci->irq == -1
|
||||
* to signal that it's been torn down and should be a noop.
|
||||
*/
|
||||
int irq = pci->irq;
|
||||
pci->irq = -1;
|
||||
free_irq(irq, pci);
|
||||
}
|
||||
|
||||
if (pci->msi)
|
||||
pci_disable_msi(pci->pdev);
|
||||
|
||||
@ -192,7 +167,6 @@ nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device,
|
||||
nvkm_subdev_ctor(&nvkm_pci_func, device, type, inst, &pci->subdev);
|
||||
pci->func = func;
|
||||
pci->pdev = device->func->pci(device)->pdev;
|
||||
pci->irq = -1;
|
||||
pci->pcie.speed = -1;
|
||||
pci->pcie.width = -1;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user