staging: gasket: remove it from the kernel

As none of the proposed things that need to be changed in the gasket
drivers have ever been done, and there has not been any forward progress
to get this out of staging, it seems totally abandonded so remove the
code entirely so that people do not spend their time doing tiny cleanups
for code that will never get out of staging.

If this code is actually being used, it can be reverted simply and then
cleaned up properly, but as it is abandoned, let's just get rid of it.

Cc: Todd Poynor <toddpoynor@google.com>
Cc: Ben Chan <benchan@chromium.org>
Cc: Richard Yeh <rcy@google.com>
Acked-by: Rob Springer <rspringer@google.com>
Link: https://lore.kernel.org/r/20210315154413.3084149-1-gregkh@linuxfoundation.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2021-03-15 16:44:13 +01:00
parent 0257aec21b
commit 918ce05bbe
20 changed files with 0 additions and 6648 deletions

View File

@ -7385,14 +7385,6 @@ F: Documentation/hwmon/gsc-hwmon.rst
F: drivers/hwmon/gsc-hwmon.c
F: include/linux/platform_data/gsc_hwmon.h
GASKET DRIVER FRAMEWORK
M: Rob Springer <rspringer@google.com>
M: Todd Poynor <toddpoynor@google.com>
M: Ben Chan <benchan@chromium.org>
M: Richard Yeh <rcy@google.com>
S: Maintained
F: drivers/staging/gasket/
GCC PLUGINS
M: Kees Cook <keescook@chromium.org>
L: linux-hardening@vger.kernel.org

View File

@ -98,8 +98,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/gasket/Kconfig"
source "drivers/staging/axis-fifo/Kconfig"
source "drivers/staging/fieldbus/Kconfig"

View File

@ -39,7 +39,6 @@ obj-$(CONFIG_PCI_MT7621) += mt7621-pci/
obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
obj-$(CONFIG_KPC2000) += kpc2000/

View File

@ -1,25 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
menu "Gasket devices"
config STAGING_GASKET_FRAMEWORK
tristate "Gasket framework"
depends on PCI && (X86_64 || ARM64)
help
This framework supports Gasket-compatible devices, such as Apex.
It is required for any of the following module(s).
To compile this driver as a module, choose M here. The module
will be called "gasket".
config STAGING_APEX_DRIVER
tristate "Apex Driver"
depends on STAGING_GASKET_FRAMEWORK
help
This driver supports the Apex Edge TPU device. See
https://cloud.google.com/edge-tpu/ for more information.
Say Y if you want to include this driver in the kernel.
To compile this driver as a module, choose M here. The module
will be called "apex".
endmenu

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for Gasket framework and dependent drivers.
#
obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket.o
obj-$(CONFIG_STAGING_APEX_DRIVER) += apex.o
gasket-objs := gasket_core.o gasket_ioctl.o gasket_interrupt.o gasket_page_table.o gasket_sysfs.o
apex-objs := apex_driver.o

View File

@ -1,22 +0,0 @@
This is a list of things that need to be done to get this driver out of the
staging directory.
- Implement the gasket framework's functionality through UIO instead of
introducing a new user-space drivers framework that is quite similar.
UIO provides the necessary bits to implement user-space drivers. Meanwhile
the gasket APIs adds some extra conveniences like PCI BAR mapping, and
MSI interrupts. Add these features to the UIO subsystem, then re-implement
the Apex driver as a basic UIO driver instead (include/linux/uio_driver.h)
- Document sysfs files with Documentation/ABI/ entries.
- Use misc interface instead of major number for driver version description.
- Add descriptions of module_param's
- apex_get_status() should actually check status.
- "drivers" should never be dealing with "raw" sysfs calls or mess around with
kobjects at all. The driver core should handle all of this for you
automaically. There should not be a need for raw attribute macros.

View File

@ -1,30 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Apex kernel-userspace interface definitions.
*
* Copyright (C) 2018 Google, Inc.
*/
#ifndef __APEX_H__
#define __APEX_H__
#include <linux/ioctl.h>
/* Clock Gating ioctl. */
struct apex_gate_clock_ioctl {
/* Enter or leave clock gated state. */
u64 enable;
/* If set, enter clock gating state, regardless of custom block's
* internal idle state
*/
u64 force_idle;
};
/* Base number for all Apex-common IOCTLs */
#define APEX_IOCTL_BASE 0x7F
/* Enable/Disable clock gating. */
#define APEX_IOCTL_GATE_CLOCK \
_IOW(APEX_IOCTL_BASE, 0, struct apex_gate_clock_ioctl)
#endif /* __APEX_H__ */

View File

@ -1,726 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for the Apex chip.
*
* Copyright (C) 2018 Google, Inc.
*/
#include <linux/compiler.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/uaccess.h>
#include "apex.h"
#include "gasket_core.h"
#include "gasket_interrupt.h"
#include "gasket_page_table.h"
#include "gasket_sysfs.h"
/* Constants */
#define APEX_DEVICE_NAME "Apex"
#define APEX_DRIVER_VERSION "1.0"
/* CSRs are in BAR 2. */
#define APEX_BAR_INDEX 2
#define APEX_PCI_VENDOR_ID 0x1ac1
#define APEX_PCI_DEVICE_ID 0x089a
/* Bar Offsets. */
#define APEX_BAR_OFFSET 0
#define APEX_CM_OFFSET 0x1000000
/* The sizes of each Apex BAR 2. */
#define APEX_BAR_BYTES 0x100000
#define APEX_CH_MEM_BYTES (PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
/* The number of user-mappable memory ranges in BAR2 of a Apex chip. */
#define NUM_REGIONS 3
/* The number of nodes in a Apex chip. */
#define NUM_NODES 1
/*
* The total number of entries in the page table. Should match the value read
* from the register APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE.
*/
#define APEX_PAGE_TABLE_TOTAL_ENTRIES 8192
#define APEX_EXTENDED_SHIFT 63 /* Extended address bit position. */
/* Check reset 120 times */
#define APEX_RESET_RETRY 120
/* Wait 100 ms between checks. Total 12 sec wait maximum. */
#define APEX_RESET_DELAY 100
/* Enumeration of the supported sysfs entries. */
enum sysfs_attribute_type {
ATTR_KERNEL_HIB_PAGE_TABLE_SIZE,
ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE,
ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES,
};
/*
* Register offsets into BAR2 memory.
* Only values necessary for driver implementation are defined.
*/
enum apex_bar2_regs {
APEX_BAR2_REG_SCU_BASE = 0x1A300,
APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_SIZE = 0x46000,
APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE = 0x46008,
APEX_BAR2_REG_KERNEL_HIB_TRANSLATION_ENABLE = 0x46010,
APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL = 0x46018,
APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL = 0x46020,
APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL = 0x46028,
APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL = 0x46030,
APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL = 0x46038,
APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL = 0x46040,
APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL = 0x46048,
APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE = 0x46050,
APEX_BAR2_REG_KERNEL_HIB_DMA_PAUSE_MASK = 0x46058,
APEX_BAR2_REG_KERNEL_HIB_STATUS_BLOCK_DELAY = 0x46060,
APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY0 = 0x46068,
APEX_BAR2_REG_KERNEL_HIB_MSIX_PENDING_BIT_ARRAY1 = 0x46070,
APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT = 0x46078,
APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT = 0x46080,
APEX_BAR2_REG_KERNEL_WIRE_INT_PENDING_BIT_ARRAY = 0x48778,
APEX_BAR2_REG_KERNEL_WIRE_INT_MASK_ARRAY = 0x48780,
APEX_BAR2_REG_USER_HIB_DMA_PAUSE = 0x486D8,
APEX_BAR2_REG_USER_HIB_DMA_PAUSED = 0x486E0,
APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER = 0x4A000,
APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE = 0x50000,
/* Error registers - Used mostly for debug */
APEX_BAR2_REG_USER_HIB_ERROR_STATUS = 0x86f0,
APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS = 0x41a0,
};
/* Addresses for packed registers. */
#define APEX_BAR2_REG_AXI_QUIESCE (APEX_BAR2_REG_SCU_BASE + 0x2C)
#define APEX_BAR2_REG_GCB_CLOCK_GATE (APEX_BAR2_REG_SCU_BASE + 0x14)
#define APEX_BAR2_REG_SCU_0 (APEX_BAR2_REG_SCU_BASE + 0xc)
#define APEX_BAR2_REG_SCU_1 (APEX_BAR2_REG_SCU_BASE + 0x10)
#define APEX_BAR2_REG_SCU_2 (APEX_BAR2_REG_SCU_BASE + 0x14)
#define APEX_BAR2_REG_SCU_3 (APEX_BAR2_REG_SCU_BASE + 0x18)
#define APEX_BAR2_REG_SCU_4 (APEX_BAR2_REG_SCU_BASE + 0x1c)
#define APEX_BAR2_REG_SCU_5 (APEX_BAR2_REG_SCU_BASE + 0x20)
#define SCU3_RG_PWR_STATE_OVR_BIT_OFFSET 26
#define SCU3_RG_PWR_STATE_OVR_MASK_WIDTH 2
#define SCU3_CUR_RST_GCB_BIT_MASK 0x10
#define SCU2_RG_RST_GCB_BIT_MASK 0xc
/* Configuration for page table. */
static struct gasket_page_table_config apex_page_table_configs[NUM_NODES] = {
{
.id = 0,
.mode = GASKET_PAGE_TABLE_MODE_NORMAL,
.total_entries = APEX_PAGE_TABLE_TOTAL_ENTRIES,
.base_reg = APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE,
.extended_reg = APEX_BAR2_REG_KERNEL_HIB_EXTENDED_TABLE,
.extended_bit = APEX_EXTENDED_SHIFT,
},
};
/* The regions in the BAR2 space that can be mapped into user space. */
static const struct gasket_mappable_region mappable_regions[NUM_REGIONS] = {
{ 0x40000, 0x1000 },
{ 0x44000, 0x1000 },
{ 0x48000, 0x1000 },
};
/* Gasket device interrupts enums must be dense (i.e., no empty slots). */
enum apex_interrupt {
APEX_INTERRUPT_INSTR_QUEUE = 0,
APEX_INTERRUPT_INPUT_ACTV_QUEUE = 1,
APEX_INTERRUPT_PARAM_QUEUE = 2,
APEX_INTERRUPT_OUTPUT_ACTV_QUEUE = 3,
APEX_INTERRUPT_SC_HOST_0 = 4,
APEX_INTERRUPT_SC_HOST_1 = 5,
APEX_INTERRUPT_SC_HOST_2 = 6,
APEX_INTERRUPT_SC_HOST_3 = 7,
APEX_INTERRUPT_TOP_LEVEL_0 = 8,
APEX_INTERRUPT_TOP_LEVEL_1 = 9,
APEX_INTERRUPT_TOP_LEVEL_2 = 10,
APEX_INTERRUPT_TOP_LEVEL_3 = 11,
APEX_INTERRUPT_FATAL_ERR = 12,
APEX_INTERRUPT_COUNT = 13,
};
/* Interrupt descriptors for Apex */
static struct gasket_interrupt_desc apex_interrupts[] = {
{
APEX_INTERRUPT_INSTR_QUEUE,
APEX_BAR2_REG_KERNEL_HIB_INSTR_QUEUE_INTVECCTL,
UNPACKED,
},
{
APEX_INTERRUPT_INPUT_ACTV_QUEUE,
APEX_BAR2_REG_KERNEL_HIB_INPUT_ACTV_QUEUE_INTVECCTL,
UNPACKED
},
{
APEX_INTERRUPT_PARAM_QUEUE,
APEX_BAR2_REG_KERNEL_HIB_PARAM_QUEUE_INTVECCTL,
UNPACKED
},
{
APEX_INTERRUPT_OUTPUT_ACTV_QUEUE,
APEX_BAR2_REG_KERNEL_HIB_OUTPUT_ACTV_QUEUE_INTVECCTL,
UNPACKED
},
{
APEX_INTERRUPT_SC_HOST_0,
APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
PACK_0
},
{
APEX_INTERRUPT_SC_HOST_1,
APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
PACK_1
},
{
APEX_INTERRUPT_SC_HOST_2,
APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
PACK_2
},
{
APEX_INTERRUPT_SC_HOST_3,
APEX_BAR2_REG_KERNEL_HIB_SC_HOST_INTVECCTL,
PACK_3
},
{
APEX_INTERRUPT_TOP_LEVEL_0,
APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
PACK_0
},
{
APEX_INTERRUPT_TOP_LEVEL_1,
APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
PACK_1
},
{
APEX_INTERRUPT_TOP_LEVEL_2,
APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
PACK_2
},
{
APEX_INTERRUPT_TOP_LEVEL_3,
APEX_BAR2_REG_KERNEL_HIB_TOP_LEVEL_INTVECCTL,
PACK_3
},
{
APEX_INTERRUPT_FATAL_ERR,
APEX_BAR2_REG_KERNEL_HIB_FATAL_ERR_INTVECCTL,
UNPACKED
},
};
/* Allows device to enter power save upon driver close(). */
static int allow_power_save = 1;
/* Allows SW based clock gating. */
static int allow_sw_clock_gating;
/* Allows HW based clock gating. */
/* Note: this is not mutual exclusive with SW clock gating. */
static int allow_hw_clock_gating = 1;
/* Act as if only GCB is instantiated. */
static int bypass_top_level;
module_param(allow_power_save, int, 0644);
module_param(allow_sw_clock_gating, int, 0644);
module_param(allow_hw_clock_gating, int, 0644);
module_param(bypass_top_level, int, 0644);
/* Check the device status registers and return device status ALIVE or DEAD. */
static int apex_get_status(struct gasket_dev *gasket_dev)
{
/* TODO: Check device status. */
return GASKET_STATUS_ALIVE;
}
/* Enter GCB reset state. */
static int apex_enter_reset(struct gasket_dev *gasket_dev)
{
if (bypass_top_level)
return 0;
/*
* Software reset:
* Enable sleep mode
* - Software force GCB idle
* - Enable GCB idle
*/
gasket_read_modify_write_64(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_IDLEGENERATOR_IDLEGEN_IDLEREGISTER,
0x0, 1, 32);
/* - Initiate DMA pause */
gasket_dev_write_64(gasket_dev, 1, APEX_BAR_INDEX,
APEX_BAR2_REG_USER_HIB_DMA_PAUSE);
/* - Wait for DMA pause complete. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_USER_HIB_DMA_PAUSED, 1, 1,
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"DMAs did not quiesce within timeout (%d ms)\n",
APEX_RESET_RETRY * APEX_RESET_DELAY);
return -ETIMEDOUT;
}
/* - Enable GCB reset (0x1 to rg_rst_gcb) */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_2, 0x1, 2, 2);
/* - Enable GCB clock Gate (0x1 to rg_gated_gcb) */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_2, 0x1, 2, 18);
/* - Enable GCB memory shut down (0x3 to rg_force_ram_sd) */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, 0x3, 2, 14);
/* - Wait for RAM shutdown. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, BIT(6), BIT(6),
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"RAM did not shut down within timeout (%d ms)\n",
APEX_RESET_RETRY * APEX_RESET_DELAY);
return -ETIMEDOUT;
}
return 0;
}
/* Quit GCB reset state. */
static int apex_quit_reset(struct gasket_dev *gasket_dev)
{
u32 val0, val1;
if (bypass_top_level)
return 0;
/*
* Disable sleep mode:
* - Disable GCB memory shut down:
* - b00: Not forced (HW controlled)
* - b1x: Force disable
*/
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, 0x0, 2, 14);
/*
* - Disable software clock gate:
* - b00: Not forced (HW controlled)
* - b1x: Force disable
*/
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_2, 0x0, 2, 18);
/*
* - Disable GCB reset (rg_rst_gcb):
* - b00: Not forced (HW controlled)
* - b1x: Force disable = Force not Reset
*/
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_2, 0x2, 2, 2);
/* - Wait for RAM enable. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, BIT(6), 0,
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"RAM did not enable within timeout (%d ms)\n",
APEX_RESET_RETRY * APEX_RESET_DELAY);
return -ETIMEDOUT;
}
/* - Wait for Reset complete. */
if (gasket_wait_with_reschedule(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3,
SCU3_CUR_RST_GCB_BIT_MASK, 0,
APEX_RESET_DELAY, APEX_RESET_RETRY)) {
dev_err(gasket_dev->dev,
"GCB did not leave reset within timeout (%d ms)\n",
APEX_RESET_RETRY * APEX_RESET_DELAY);
return -ETIMEDOUT;
}
if (!allow_hw_clock_gating) {
val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3);
/* Inactive and Sleep mode are disabled. */
gasket_read_modify_write_32(gasket_dev,
APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, 0x3,
SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3);
dev_dbg(gasket_dev->dev,
"Disallow HW clock gating 0x%x -> 0x%x\n", val0, val1);
} else {
val0 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3);
/* Inactive mode enabled - Sleep mode disabled. */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3, 2,
SCU3_RG_PWR_STATE_OVR_MASK_WIDTH,
SCU3_RG_PWR_STATE_OVR_BIT_OFFSET);
val1 = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3);
dev_dbg(gasket_dev->dev, "Allow HW clock gating 0x%x -> 0x%x\n",
val0, val1);
}
return 0;
}
/* Reset the Apex hardware. Called on final close via device_close_cb. */
static int apex_device_cleanup(struct gasket_dev *gasket_dev)
{
u64 scalar_error;
u64 hib_error;
int ret = 0;
hib_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_USER_HIB_ERROR_STATUS);
scalar_error = gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCALAR_CORE_ERROR_STATUS);
dev_dbg(gasket_dev->dev,
"%s 0x%p hib_error 0x%llx scalar_error 0x%llx\n",
__func__, gasket_dev, hib_error, scalar_error);
if (allow_power_save)
ret = apex_enter_reset(gasket_dev);
return ret;
}
/* Determine if GCB is in reset state. */
static bool is_gcb_in_reset(struct gasket_dev *gasket_dev)
{
u32 val = gasket_dev_read_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_SCU_3);
/* Masks rg_rst_gcb bit of SCU_CTRL_2 */
return (val & SCU3_CUR_RST_GCB_BIT_MASK);
}
/* Reset the hardware, then quit reset. Called on device open. */
static int apex_reset(struct gasket_dev *gasket_dev)
{
int ret;
if (bypass_top_level)
return 0;
if (!is_gcb_in_reset(gasket_dev)) {
/* We are not in reset - toggle the reset bit so as to force
* re-init of custom block
*/
dev_dbg(gasket_dev->dev, "%s: toggle reset\n", __func__);
ret = apex_enter_reset(gasket_dev);
if (ret)
return ret;
}
return apex_quit_reset(gasket_dev);
}
/*
* Check permissions for Apex ioctls.
* Returns true if the current user may execute this ioctl, and false otherwise.
*/
static bool apex_ioctl_check_permissions(struct file *filp, uint cmd)
{
return !!(filp->f_mode & FMODE_WRITE);
}
/* Gates or un-gates Apex clock. */
static long apex_clock_gating(struct gasket_dev *gasket_dev,
struct apex_gate_clock_ioctl __user *argp)
{
struct apex_gate_clock_ioctl ibuf;
if (bypass_top_level || !allow_sw_clock_gating)
return 0;
if (copy_from_user(&ibuf, argp, sizeof(ibuf)))
return -EFAULT;
dev_dbg(gasket_dev->dev, "%s %llu\n", __func__, ibuf.enable);
if (ibuf.enable) {
/* Quiesce AXI, gate GCB clock. */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_AXI_QUIESCE, 0x1, 1,
16);
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_GCB_CLOCK_GATE, 0x1,
2, 18);
} else {
/* Un-gate GCB clock, un-quiesce AXI. */
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_GCB_CLOCK_GATE, 0x0,
2, 18);
gasket_read_modify_write_32(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_AXI_QUIESCE, 0x0, 1,
16);
}
return 0;
}
/* Apex-specific ioctl handler. */
static long apex_ioctl(struct file *filp, uint cmd, void __user *argp)
{
struct gasket_dev *gasket_dev = filp->private_data;
if (!apex_ioctl_check_permissions(filp, cmd))
return -EPERM;
switch (cmd) {
case APEX_IOCTL_GATE_CLOCK:
return apex_clock_gating(gasket_dev, argp);
default:
return -ENOTTY; /* unknown command */
}
}
/* Display driver sysfs entries. */
static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
char *buf)
{
int ret;
struct gasket_dev *gasket_dev;
struct gasket_sysfs_attribute *gasket_attr;
enum sysfs_attribute_type type;
struct gasket_page_table *gpt;
uint val;
gasket_dev = gasket_sysfs_get_device_data(device);
if (!gasket_dev) {
dev_err(device, "No Apex device sysfs mapping found\n");
return -ENODEV;
}
gasket_attr = gasket_sysfs_get_attr(device, attr);
if (!gasket_attr) {
dev_err(device, "No Apex device sysfs attr data found\n");
gasket_sysfs_put_device_data(device, gasket_dev);
return -ENODEV;
}
type = (enum sysfs_attribute_type)gasket_attr->data.attr_type;
gpt = gasket_dev->page_table[0];
switch (type) {
case ATTR_KERNEL_HIB_PAGE_TABLE_SIZE:
val = gasket_page_table_num_entries(gpt);
break;
case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
val = gasket_page_table_num_simple_entries(gpt);
break;
case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
val = gasket_page_table_num_active_pages(gpt);
break;
default:
dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
attr->attr.name);
ret = 0;
goto exit;
}
ret = scnprintf(buf, PAGE_SIZE, "%u\n", val);
exit:
gasket_sysfs_put_attr(device, gasket_attr);
gasket_sysfs_put_device_data(device, gasket_dev);
return ret;
}
static struct gasket_sysfs_attribute apex_sysfs_attrs[] = {
GASKET_SYSFS_RO(node_0_page_table_entries, sysfs_show,
ATTR_KERNEL_HIB_PAGE_TABLE_SIZE),
GASKET_SYSFS_RO(node_0_simple_page_table_entries, sysfs_show,
ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE),
GASKET_SYSFS_RO(node_0_num_mapped_pages, sysfs_show,
ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES),
GASKET_END_OF_ATTR_ARRAY
};
/* On device open, perform a core reinit reset. */
static int apex_device_open_cb(struct gasket_dev *gasket_dev)
{
return gasket_reset_nolock(gasket_dev);
}
static const struct pci_device_id apex_pci_ids[] = {
{ PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
};
static int apex_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
int ret;
ulong page_table_ready, msix_table_ready;
int retries = 0;
struct gasket_dev *gasket_dev;
ret = pci_enable_device(pci_dev);
if (ret) {
dev_err(&pci_dev->dev, "error enabling PCI device\n");
return ret;
}
pci_set_master(pci_dev);
ret = gasket_pci_add_device(pci_dev, &gasket_dev);
if (ret) {
dev_err(&pci_dev->dev, "error adding gasket device\n");
pci_disable_device(pci_dev);
return ret;
}
pci_set_drvdata(pci_dev, gasket_dev);
apex_reset(gasket_dev);
while (retries < APEX_RESET_RETRY) {
page_table_ready =
gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_KERNEL_HIB_PAGE_TABLE_INIT);
msix_table_ready =
gasket_dev_read_64(gasket_dev, APEX_BAR_INDEX,
APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE_INIT);
if (page_table_ready && msix_table_ready)
break;
schedule_timeout(msecs_to_jiffies(APEX_RESET_DELAY));
retries++;
}
if (retries == APEX_RESET_RETRY) {
if (!page_table_ready)
dev_err(gasket_dev->dev, "Page table init timed out\n");
if (!msix_table_ready)
dev_err(gasket_dev->dev, "MSI-X table init timed out\n");
ret = -ETIMEDOUT;
goto remove_device;
}
ret = gasket_sysfs_create_entries(gasket_dev->dev_info.device,
apex_sysfs_attrs);
if (ret)
dev_err(&pci_dev->dev, "error creating device sysfs entries\n");
ret = gasket_enable_device(gasket_dev);
if (ret) {
dev_err(&pci_dev->dev, "error enabling gasket device\n");
goto remove_device;
}
/* Place device in low power mode until opened */
if (allow_power_save)
apex_enter_reset(gasket_dev);
return 0;
remove_device:
gasket_pci_remove_device(pci_dev);
pci_disable_device(pci_dev);
return ret;
}
static void apex_pci_remove(struct pci_dev *pci_dev)
{
struct gasket_dev *gasket_dev = pci_get_drvdata(pci_dev);
gasket_disable_device(gasket_dev);
gasket_pci_remove_device(pci_dev);
pci_disable_device(pci_dev);
}
static const struct gasket_driver_desc apex_desc = {
.name = "apex",
.driver_version = APEX_DRIVER_VERSION,
.major = 120,
.minor = 0,
.module = THIS_MODULE,
.pci_id_table = apex_pci_ids,
.num_page_tables = NUM_NODES,
.page_table_bar_index = APEX_BAR_INDEX,
.page_table_configs = apex_page_table_configs,
.page_table_extended_bit = APEX_EXTENDED_SHIFT,
.bar_descriptions = {
GASKET_UNUSED_BAR,
GASKET_UNUSED_BAR,
{ APEX_BAR_BYTES, (VM_WRITE | VM_READ), APEX_BAR_OFFSET,
NUM_REGIONS, mappable_regions, PCI_BAR },
GASKET_UNUSED_BAR,
GASKET_UNUSED_BAR,
GASKET_UNUSED_BAR,
},
.coherent_buffer_description = {
APEX_CH_MEM_BYTES,
(VM_WRITE | VM_READ),
APEX_CM_OFFSET,
},
.interrupt_type = PCI_MSIX,
.interrupt_bar_index = APEX_BAR_INDEX,
.num_interrupts = APEX_INTERRUPT_COUNT,
.interrupts = apex_interrupts,
.interrupt_pack_width = 7,
.device_open_cb = apex_device_open_cb,
.device_close_cb = apex_device_cleanup,
.ioctl_handler_cb = apex_ioctl,
.device_status_cb = apex_get_status,
.hardware_revision_cb = NULL,
.device_reset_cb = apex_reset,
};
static struct pci_driver apex_pci_driver = {
.name = "apex",
.probe = apex_pci_probe,
.remove = apex_pci_remove,
.id_table = apex_pci_ids,
};
static int __init apex_init(void)
{
int ret;
ret = gasket_register_device(&apex_desc);
if (ret)
return ret;
ret = pci_register_driver(&apex_pci_driver);
if (ret)
gasket_unregister_device(&apex_desc);
return ret;
}
static void apex_exit(void)
{
pci_unregister_driver(&apex_pci_driver);
gasket_unregister_device(&apex_desc);
}
MODULE_DESCRIPTION("Google Apex driver");
MODULE_VERSION(APEX_DRIVER_VERSION);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("John Joseph <jnjoseph@google.com>");
MODULE_DEVICE_TABLE(pci, apex_pci_ids);
module_init(apex_init);
module_exit(apex_exit);

View File

@ -1,122 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Common Gasket device kernel and user space declarations.
*
* Copyright (C) 2018 Google, Inc.
*/
#ifndef __GASKET_H__
#define __GASKET_H__
#include <linux/ioctl.h>
#include <linux/types.h>
/* ioctl structure declarations */
/* Ioctl structures are padded to a multiple of 64 bits */
/* and padded to put 64 bit values on 64 bit boundaries. */
/* Unsigned 64 bit integers are used to hold pointers. */
/* This helps compatibility between 32 and 64 bits. */
/*
* Common structure for ioctls associating an eventfd with a device interrupt,
* when using the Gasket interrupt module.
*/
struct gasket_interrupt_eventfd {
u64 interrupt;
u64 event_fd;
};
/*
* Common structure for ioctls mapping and unmapping buffers when using the
* Gasket page_table module.
*/
struct gasket_page_table_ioctl {
u64 page_table_index;
u64 size;
u64 host_address;
u64 device_address;
};
/*
* Common structure for ioctls mapping and unmapping buffers when using the
* Gasket page_table module.
* dma_address: phys addr start of coherent memory, allocated by kernel
*/
struct gasket_coherent_alloc_config_ioctl {
u64 page_table_index;
u64 enable;
u64 size;
u64 dma_address;
};
/* Base number for all Gasket-common IOCTLs */
#define GASKET_IOCTL_BASE 0xDC
/* Reset the device. */
#define GASKET_IOCTL_RESET _IO(GASKET_IOCTL_BASE, 0)
/* Associate the specified [event]fd with the specified interrupt. */
#define GASKET_IOCTL_SET_EVENTFD \
_IOW(GASKET_IOCTL_BASE, 1, struct gasket_interrupt_eventfd)
/*
* Clears any eventfd associated with the specified interrupt. The (ulong)
* argument is the interrupt number to clear.
*/
#define GASKET_IOCTL_CLEAR_EVENTFD _IOW(GASKET_IOCTL_BASE, 2, unsigned long)
/*
* [Loopbacks only] Requests that the loopback device send the specified
* interrupt to the host. The (ulong) argument is the number of the interrupt to
* send.
*/
#define GASKET_IOCTL_LOOPBACK_INTERRUPT \
_IOW(GASKET_IOCTL_BASE, 3, unsigned long)
/* Queries the kernel for the number of page tables supported by the device. */
#define GASKET_IOCTL_NUMBER_PAGE_TABLES _IOR(GASKET_IOCTL_BASE, 4, u64)
/*
* Queries the kernel for the maximum size of the page table. Only the size and
* page_table_index fields are used from the struct gasket_page_table_ioctl.
*/
#define GASKET_IOCTL_PAGE_TABLE_SIZE \
_IOWR(GASKET_IOCTL_BASE, 5, struct gasket_page_table_ioctl)
/*
* Queries the kernel for the current simple page table size. Only the size and
* page_table_index fields are used from the struct gasket_page_table_ioctl.
*/
#define GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE \
_IOWR(GASKET_IOCTL_BASE, 6, struct gasket_page_table_ioctl)
/*
* Tells the kernel to change the split between the number of simple and
* extended entries in the given page table. Only the size and page_table_index
* fields are used from the struct gasket_page_table_ioctl.
*/
#define GASKET_IOCTL_PARTITION_PAGE_TABLE \
_IOW(GASKET_IOCTL_BASE, 7, struct gasket_page_table_ioctl)
/*
* Tells the kernel to map size bytes at host_address to device_address in
* page_table_index page table.
*/
#define GASKET_IOCTL_MAP_BUFFER \
_IOW(GASKET_IOCTL_BASE, 8, struct gasket_page_table_ioctl)
/*
* Tells the kernel to unmap size bytes at host_address from device_address in
* page_table_index page table.
*/
#define GASKET_IOCTL_UNMAP_BUFFER \
_IOW(GASKET_IOCTL_BASE, 9, struct gasket_page_table_ioctl)
/* Clear the interrupt counts stored for this device. */
#define GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS _IO(GASKET_IOCTL_BASE, 10)
/* Enable/Disable and configure the coherent allocator. */
#define GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR \
_IOWR(GASKET_IOCTL_BASE, 11, struct gasket_coherent_alloc_config_ioctl)
#endif /* __GASKET_H__ */

View File

@ -1,44 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Google, Inc. */
#ifndef __GASKET_CONSTANTS_H__
#define __GASKET_CONSTANTS_H__
#define GASKET_FRAMEWORK_VERSION "1.1.2"
/*
* The maximum number of simultaneous device types supported by the framework.
*/
#define GASKET_FRAMEWORK_DESC_MAX 2
/* The maximum devices per each type. */
#define GASKET_DEV_MAX 256
/* The number of supported Gasket page tables per device. */
#define GASKET_MAX_NUM_PAGE_TABLES 1
/* Maximum length of device names (driver name + minor number suffix + NULL). */
#define GASKET_NAME_MAX 32
/* Device status enumeration. */
enum gasket_status {
/*
* A device is DEAD if it has not been initialized or has had an error.
*/
GASKET_STATUS_DEAD = 0,
/*
* A device is LAMED if the hardware is healthy but the kernel was
* unable to enable some functionality (e.g. interrupts).
*/
GASKET_STATUS_LAMED,
/* A device is ALIVE if it is ready for operation. */
GASKET_STATUS_ALIVE,
/*
* This status is set when the driver is exiting and waiting for all
* handles to be closed.
*/
GASKET_STATUS_DRIVER_EXIT,
};
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,638 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Gasket generic driver. Defines the set of data types and functions necessary
* to define a driver using the Gasket generic driver framework.
*
* Copyright (C) 2018 Google, Inc.
*/
#ifndef __GASKET_CORE_H__
#define __GASKET_CORE_H__
#include <linux/cdev.h>
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include "gasket_constants.h"
/**
* struct gasket_num_name - Map numbers to names.
* @ein_num: Number.
* @ein_name: Name associated with the number, a char pointer.
*
* This structure maps numbers to names. It is used to provide printable enum
* names, e.g {0, "DEAD"} or {1, "ALIVE"}.
*/
struct gasket_num_name {
uint snn_num;
const char *snn_name;
};
/*
* Register location for packed interrupts.
* Each value indicates the location of an interrupt field (in units of
* gasket_driver_desc->interrupt_pack_width) within the containing register.
* In other words, this indicates the shift to use when creating a mask to
* extract/set bits within a register for a given interrupt.
*/
enum gasket_interrupt_packing {
PACK_0 = 0,
PACK_1 = 1,
PACK_2 = 2,
PACK_3 = 3,
UNPACKED = 4,
};
/* Type of the interrupt supported by the device. */
enum gasket_interrupt_type {
PCI_MSIX = 0,
};
/*
* Used to describe a Gasket interrupt. Contains an interrupt index, a register,
* and packing data for that interrupt. The register and packing data
* fields are relevant only for PCI_MSIX interrupt type and can be
* set to 0 for everything else.
*/
struct gasket_interrupt_desc {
/* Device-wide interrupt index/number. */
int index;
/* The register offset controlling this interrupt. */
u64 reg;
/* The location of this interrupt inside register reg, if packed. */
int packing;
};
/*
* This enum is used to identify memory regions being part of the physical
* memory that belongs to a device.
*/
enum mappable_area_type {
PCI_BAR = 0, /* Default */
BUS_REGION, /* For SYSBUS devices, i.e. AXI etc... */
COHERENT_MEMORY
};
/*
* Metadata for each BAR mapping.
* This struct is used so as to track PCI memory, I/O space, AXI and coherent
* memory area... i.e. memory objects which can be referenced in the device's
* mmap function.
*/
struct gasket_bar_data {
/* Virtual base address. */
u8 __iomem *virt_base;
/* Physical base address. */
ulong phys_base;
/* Length of the mapping. */
ulong length_bytes;
/* Type of mappable area */
enum mappable_area_type type;
};
/* Maintains device open ownership data. */
struct gasket_ownership {
/* 1 if the device is owned, 0 otherwise. */
int is_owned;
/* TGID of the owner. */
pid_t owner;
/* Count of current device opens in write mode. */
int write_open_count;
};
/* Page table modes of operation. */
enum gasket_page_table_mode {
/* The page table is partitionable as normal, all simple by default. */
GASKET_PAGE_TABLE_MODE_NORMAL,
/* All entries are always simple. */
GASKET_PAGE_TABLE_MODE_SIMPLE,
/* All entries are always extended. No extended bit is used. */
GASKET_PAGE_TABLE_MODE_EXTENDED,
};
/* Page table configuration. One per table. */
struct gasket_page_table_config {
/* The identifier/index of this page table. */
int id;
/* The operation mode of this page table. */
enum gasket_page_table_mode mode;
/* Total (first-level) entries in this page table. */
ulong total_entries;
/* Base register for the page table. */
int base_reg;
/*
* Register containing the extended page table. This value is unused in
* GASKET_PAGE_TABLE_MODE_SIMPLE and GASKET_PAGE_TABLE_MODE_EXTENDED
* modes.
*/
int extended_reg;
/* The bit index indicating whether a PT entry is extended. */
int extended_bit;
};
/* Maintains information about a device node. */
struct gasket_cdev_info {
/* The internal name of this device. */
char name[GASKET_NAME_MAX];
/* Device number. */
dev_t devt;
/* Kernel-internal device structure. */
struct device *device;
/* Character device for real. */
struct cdev cdev;
/* Flag indicating if cdev_add has been called for the devices. */
int cdev_added;
/* Pointer to the overall gasket_dev struct for this device. */
struct gasket_dev *gasket_dev_ptr;
/* Ownership data for the device in question. */
struct gasket_ownership ownership;
};
/* Describes the offset and length of mmapable device BAR regions. */
struct gasket_mappable_region {
u64 start;
u64 length_bytes;
};
/* Describe the offset, size, and permissions for a device bar. */
struct gasket_bar_desc {
/*
* The size of each PCI BAR range, in bytes. If a value is 0, that BAR
* will not be mapped into kernel space at all.
* For devices with 64 bit BARs, only elements 0, 2, and 4 should be
* populated, and 1, 3, and 5 should be set to 0.
* For example, for a device mapping 1M in each of the first two 64-bit
* BARs, this field would be set as { 0x100000, 0, 0x100000, 0, 0, 0 }
* (one number per bar_desc struct.)
*/
u64 size;
/* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
* and can be or'd.) If set to GASKET_NOMAP, the bar will
* not be used for mmapping.
*/
ulong permissions;
/* The memory address corresponding to the base of this bar, if used. */
u64 base;
/* The number of mappable regions in this bar. */
int num_mappable_regions;
/* The mappable subregions of this bar. */
const struct gasket_mappable_region *mappable_regions;
/* Type of mappable area */
enum mappable_area_type type;
};
/* Describes the offset, size, and permissions for a coherent buffer. */
struct gasket_coherent_buffer_desc {
/* The size of the coherent buffer. */
u64 size;
/* The permissions for this bar. (Should be VM_WRITE/VM_READ/VM_EXEC,
* and can be or'd.) If set to GASKET_NOMAP, the bar will
* not be used for mmaping.
*/
ulong permissions;
/* device side address. */
u64 base;
};
/* Coherent buffer structure. */
struct gasket_coherent_buffer {
/* Virtual base address. */
u8 *virt_base;
/* Physical base address. */
ulong phys_base;
/* Length of the mapping. */
ulong length_bytes;
};
/* Description of Gasket-specific permissions in the mmap field. */
enum gasket_mapping_options { GASKET_NOMAP = 0 };
/* This struct represents an undefined bar that should never be mapped. */
#define GASKET_UNUSED_BAR \
{ \
0, GASKET_NOMAP, 0, 0, NULL, 0 \
}
/* Internal data for a Gasket device. See gasket_core.c for more information. */
struct gasket_internal_desc;
#define MAX_NUM_COHERENT_PAGES 16
/*
* Device data for Gasket device instances.
*
* This structure contains the data required to manage a Gasket device.
*/
struct gasket_dev {
/* Pointer to the internal driver description for this device. */
struct gasket_internal_desc *internal_desc;
/* Device info */
struct device *dev;
/* PCI subsystem metadata. */
struct pci_dev *pci_dev;
/* This device's index into internal_desc->devs. */
int dev_idx;
/* The name of this device, as reported by the kernel. */
char kobj_name[GASKET_NAME_MAX];
/* Virtual address of mapped BAR memory range. */
struct gasket_bar_data bar_data[PCI_STD_NUM_BARS];
/* Coherent buffer. */
struct gasket_coherent_buffer coherent_buffer;
/* Number of page tables for this device. */
int num_page_tables;
/* Address translations. Page tables have a private implementation. */
struct gasket_page_table *page_table[GASKET_MAX_NUM_PAGE_TABLES];
/* Interrupt data for this device. */
struct gasket_interrupt_data *interrupt_data;
/* Status for this device - GASKET_STATUS_ALIVE or _DEAD. */
uint status;
/* Number of times this device has been reset. */
uint reset_count;
/* Dev information for the cdev node. */
struct gasket_cdev_info dev_info;
/* Hardware revision value for this device. */
int hardware_revision;
/* Protects access to per-device data (i.e. this structure). */
struct mutex mutex;
/* cdev hash tracking/membership structure, Accel and legacy. */
/* Unused until Accel is upstreamed. */
struct hlist_node hlist_node;
struct hlist_node legacy_hlist_node;
};
/* Type of the ioctl handler callback. */
typedef long (*gasket_ioctl_handler_cb_t)(struct file *file, uint cmd,
void __user *argp);
/* Type of the ioctl permissions check callback. See below. */
typedef int (*gasket_ioctl_permissions_cb_t)(struct file *filp, uint cmd,
void __user *argp);
/*
* Device type descriptor.
*
* This structure contains device-specific data needed to identify and address a
* type of device to be administered via the Gasket generic driver.
*
* Device IDs are per-driver. In other words, two drivers using the Gasket
* framework will each have a distinct device 0 (for example).
*/
struct gasket_driver_desc {
/* The name of this device type. */
const char *name;
/* The name of this specific device model. */
const char *chip_model;
/* The version of the chip specified in chip_model. */
const char *chip_version;
/* The version of this driver: "1.0.0", "2.1.3", etc. */
const char *driver_version;
/*
* Non-zero if we should create "legacy" (device and device-class-
* specific) character devices and sysfs nodes.
*/
/* Unused until Accel is upstreamed. */
int legacy_support;
/* Major and minor numbers identifying the device. */
int major, minor;
/* Module structure for this driver. */
struct module *module;
/* PCI ID table. */
const struct pci_device_id *pci_id_table;
/* The number of page tables handled by this driver. */
int num_page_tables;
/* The index of the bar containing the page tables. */
int page_table_bar_index;
/* Registers used to control each page table. */
const struct gasket_page_table_config *page_table_configs;
/* The bit index indicating whether a PT entry is extended. */
int page_table_extended_bit;
/*
* Legacy mmap address adjusment for legacy devices only. Should be 0
* for any new device.
*/
ulong legacy_mmap_address_offset;
/* Set of 6 bar descriptions that describe all PCIe bars.
* Note that BUS/AXI devices (i.e. non PCI devices) use those.
*/
struct gasket_bar_desc bar_descriptions[PCI_STD_NUM_BARS];
/*
* Coherent buffer description.
*/
struct gasket_coherent_buffer_desc coherent_buffer_description;
/* Interrupt type. (One of gasket_interrupt_type). */
int interrupt_type;
/* Index of the bar containing the interrupt registers to program. */
int interrupt_bar_index;
/* Number of interrupts in the gasket_interrupt_desc array */
int num_interrupts;
/* Description of the interrupts for this device. */
const struct gasket_interrupt_desc *interrupts;
/*
* If this device packs multiple interrupt->MSI-X mappings into a
* single register (i.e., "uses packed interrupts"), only a single bit
* width is supported for each interrupt mapping (unpacked/"full-width"
* interrupts are always supported). This value specifies that width. If
* packed interrupts are not used, this value is ignored.
*/
int interrupt_pack_width;
/* Driver callback functions - all may be NULL */
/*
* device_open_cb: Callback for when a device node is opened in write
* mode.
* @dev: The gasket_dev struct for this driver instance.
*
* This callback should perform device-specific setup that needs to
* occur only once when a device is first opened.
*/
int (*device_open_cb)(struct gasket_dev *dev);
/*
* device_release_cb: Callback when a device is closed.
* @gasket_dev: The gasket_dev struct for this driver instance.
*
* This callback is called whenever a device node fd is closed, as
* opposed to device_close_cb, which is called when the _last_
* descriptor for an open file is closed. This call is intended to
* handle any per-user or per-fd cleanup.
*/
int (*device_release_cb)(struct gasket_dev *gasket_dev,
struct file *file);
/*
* device_close_cb: Callback for when a device node is closed for the
* last time.
* @dev: The gasket_dev struct for this driver instance.
*
* This callback should perform device-specific cleanup that only
* needs to occur when the last reference to a device node is closed.
*
* This call is intended to handle and device-wide cleanup, as opposed
* to per-fd cleanup (which should be handled by device_release_cb).
*/
int (*device_close_cb)(struct gasket_dev *dev);
/*
* get_mappable_regions_cb: Get descriptors of mappable device memory.
* @gasket_dev: Pointer to the struct gasket_dev for this device.
* @bar_index: BAR for which to retrieve memory ranges.
* @mappable_regions: Out-pointer to the list of mappable regions on the
* device/BAR for this process.
* @num_mappable_regions: Out-pointer for the size of mappable_regions.
*
* Called when handling mmap(), this callback is used to determine which
* regions of device memory may be mapped by the current process. This
* information is then compared to mmap request to determine which
* regions to actually map.
*/
int (*get_mappable_regions_cb)(struct gasket_dev *gasket_dev,
int bar_index,
struct gasket_mappable_region **mappable_regions,
int *num_mappable_regions);
/*
* ioctl_permissions_cb: Check permissions for generic ioctls.
* @filp: File structure pointer describing this node usage session.
* @cmd: ioctl number to handle.
* @arg: ioctl-specific data pointer.
*
* Returns 1 if the ioctl may be executed, 0 otherwise. If this callback
* isn't specified a default routine will be used, that only allows the
* original device opener (i.e, the "owner") to execute state-affecting
* ioctls.
*/
gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
/*
* ioctl_handler_cb: Callback to handle device-specific ioctls.
* @filp: File structure pointer describing this node usage session.
* @cmd: ioctl number to handle.
* @arg: ioctl-specific data pointer.
*
* Invoked whenever an ioctl is called that the generic Gasket
* framework doesn't support. If no cb is registered, unknown ioctls
* return -EINVAL. Should return an error status (either -EINVAL or
* the error result of the ioctl being handled).
*/
gasket_ioctl_handler_cb_t ioctl_handler_cb;
/*
* device_status_cb: Callback to determine device health.
* @dev: Pointer to the gasket_dev struct for this device.
*
* Called to determine if the device is healthy or not. Should return
* a member of the gasket_status_type enum.
*
*/
int (*device_status_cb)(struct gasket_dev *dev);
/*
* hardware_revision_cb: Get the device's hardware revision.
* @dev: Pointer to the gasket_dev struct for this device.
*
* Called to determine the reported rev of the physical hardware.
* Revision should be >0. A negative return value is an error.
*/
int (*hardware_revision_cb)(struct gasket_dev *dev);
/*
* device_reset_cb: Reset the hardware in question.
* @dev: Pointer to the gasket_dev structure for this device.
*
* Called by reset ioctls. This function should not
* lock the gasket_dev mutex. It should return 0 on success
* and an error on failure.
*/
int (*device_reset_cb)(struct gasket_dev *dev);
};
/*
* Register the specified device type with the framework.
* @desc: Populated/initialized device type descriptor.
*
* This function does _not_ take ownership of desc; the underlying struct must
* exist until the matching call to gasket_unregister_device.
* This function should be called from your driver's module_init function.
*/
int gasket_register_device(const struct gasket_driver_desc *desc);
/*
* Remove the specified device type from the framework.
* @desc: Descriptor for the device type to unregister; it should have been
* passed to gasket_register_device in a previous call.
*
* This function should be called from your driver's module_exit function.
*/
void gasket_unregister_device(const struct gasket_driver_desc *desc);
/* Add a PCI gasket device. */
int gasket_pci_add_device(struct pci_dev *pci_dev,
struct gasket_dev **gasket_devp);
/* Remove a PCI gasket device. */
void gasket_pci_remove_device(struct pci_dev *pci_dev);
/* Enable a Gasket device. */
int gasket_enable_device(struct gasket_dev *gasket_dev);
/* Disable a Gasket device. */
void gasket_disable_device(struct gasket_dev *gasket_dev);
/*
* Reset the Gasket device.
* @gasket_dev: Gasket device struct.
*
* Calls device_reset_cb. Returns 0 on success and an error code othewrise.
* gasket_reset_nolock will not lock the mutex, gasket_reset will.
*
*/
int gasket_reset(struct gasket_dev *gasket_dev);
int gasket_reset_nolock(struct gasket_dev *gasket_dev);
/*
* Memory management functions. These will likely be spun off into their own
* file in the future.
*/
/* Unmaps the specified mappable region from a VMA. */
int gasket_mm_unmap_region(const struct gasket_dev *gasket_dev,
struct vm_area_struct *vma,
const struct gasket_mappable_region *map_region);
/*
* Get the ioctl permissions callback.
* @gasket_dev: Gasket device structure.
*/
gasket_ioctl_permissions_cb_t
gasket_get_ioctl_permissions_cb(struct gasket_dev *gasket_dev);
/**
* Lookup a name by number in a num_name table.
* @num: Number to lookup.
* @table: Array of num_name structures, the table for the lookup.
*
*/
const char *gasket_num_name_lookup(uint num,
const struct gasket_num_name *table);
/* Handy inlines */
static inline ulong gasket_dev_read_64(struct gasket_dev *gasket_dev, int bar,
ulong location)
{
return readq_relaxed(&gasket_dev->bar_data[bar].virt_base[location]);
}
static inline void gasket_dev_write_64(struct gasket_dev *dev, u64 value,
int bar, ulong location)
{
writeq_relaxed(value, &dev->bar_data[bar].virt_base[location]);
}
static inline void gasket_dev_write_32(struct gasket_dev *dev, u32 value,
int bar, ulong location)
{
writel_relaxed(value, &dev->bar_data[bar].virt_base[location]);
}
static inline u32 gasket_dev_read_32(struct gasket_dev *dev, int bar,
ulong location)
{
return readl_relaxed(&dev->bar_data[bar].virt_base[location]);
}
static inline void gasket_read_modify_write_64(struct gasket_dev *dev, int bar,
ulong location, u64 value,
u64 mask_width, u64 mask_shift)
{
u64 mask, tmp;
tmp = gasket_dev_read_64(dev, bar, location);
mask = ((1ULL << mask_width) - 1) << mask_shift;
tmp = (tmp & ~mask) | (value << mask_shift);
gasket_dev_write_64(dev, tmp, bar, location);
}
static inline void gasket_read_modify_write_32(struct gasket_dev *dev, int bar,
ulong location, u32 value,
u32 mask_width, u32 mask_shift)
{
u32 mask, tmp;
tmp = gasket_dev_read_32(dev, bar, location);
mask = ((1 << mask_width) - 1) << mask_shift;
tmp = (tmp & ~mask) | (value << mask_shift);
gasket_dev_write_32(dev, tmp, bar, location);
}
/* Get the Gasket driver structure for a given device. */
const struct gasket_driver_desc *gasket_get_driver_desc(struct gasket_dev *dev);
/* Get the device structure for a given device. */
struct device *gasket_get_device(struct gasket_dev *dev);
/* Helper function, Asynchronous waits on a given set of bits. */
int gasket_wait_with_reschedule(struct gasket_dev *gasket_dev, int bar,
u64 offset, u64 mask, u64 val,
uint max_retries, u64 delay_ms);
#endif /* __GASKET_CORE_H__ */

View File

@ -1,515 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018 Google, Inc. */
#include "gasket_interrupt.h"
#include "gasket_constants.h"
#include "gasket_core.h"
#include "gasket_sysfs.h"
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/printk.h>
#ifdef GASKET_KERNEL_TRACE_SUPPORT
#define CREATE_TRACE_POINTS
#include <trace/events/gasket_interrupt.h>
#else
#define trace_gasket_interrupt_event(x, ...)
#endif
/* Retry attempts if the requested number of interrupts aren't available. */
#define MSIX_RETRY_COUNT 3
/* Instance interrupt management data. */
struct gasket_interrupt_data {
/* The name associated with this interrupt data. */
const char *name;
/* Interrupt type. See gasket_interrupt_type in gasket_core.h */
int type;
/* The PCI device [if any] associated with the owning device. */
struct pci_dev *pci_dev;
/* Set to 1 if MSI-X has successfully been configred, 0 otherwise. */
int msix_configured;
/* The number of interrupts requested by the owning device. */
int num_interrupts;
/* A pointer to the interrupt descriptor struct for this device. */
const struct gasket_interrupt_desc *interrupts;
/* The index of the bar into which interrupts should be mapped. */
int interrupt_bar_index;
/* The width of a single interrupt in a packed interrupt register. */
int pack_width;
/*
* Design-wise, these elements should be bundled together, but
* pci_enable_msix's interface requires that they be managed
* individually (requires array of struct msix_entry).
*/
/* The number of successfully configured interrupts. */
int num_configured;
/* The MSI-X data for each requested/configured interrupt. */
struct msix_entry *msix_entries;
/* The eventfd "callback" data for each interrupt. */
struct eventfd_ctx **eventfd_ctxs;
/* The number of times each interrupt has been called. */
ulong *interrupt_counts;
/* Linux IRQ number. */
int irq;
};
/* Structures to display interrupt counts in sysfs. */
enum interrupt_sysfs_attribute_type {
ATTR_INTERRUPT_COUNTS,
};
/* Set up device registers for interrupt handling. */
static void gasket_interrupt_setup(struct gasket_dev *gasket_dev)
{
int i;
int pack_shift;
ulong mask;
ulong value;
struct gasket_interrupt_data *interrupt_data =
gasket_dev->interrupt_data;
if (!interrupt_data) {
dev_dbg(gasket_dev->dev, "Interrupt data is not initialized\n");
return;
}
dev_dbg(gasket_dev->dev, "Running interrupt setup\n");
/* Setup the MSIX table. */
for (i = 0; i < interrupt_data->num_interrupts; i++) {
/*
* If the interrupt is not packed, we can write the index into
* the register directly. If not, we need to deal with a read-
* modify-write and shift based on the packing index.
*/
dev_dbg(gasket_dev->dev,
"Setting up interrupt index %d with index 0x%llx and packing %d\n",
interrupt_data->interrupts[i].index,
interrupt_data->interrupts[i].reg,
interrupt_data->interrupts[i].packing);
if (interrupt_data->interrupts[i].packing == UNPACKED) {
value = interrupt_data->interrupts[i].index;
} else {
switch (interrupt_data->interrupts[i].packing) {
case PACK_0:
pack_shift = 0;
break;
case PACK_1:
pack_shift = interrupt_data->pack_width;
break;
case PACK_2:
pack_shift = 2 * interrupt_data->pack_width;
break;
case PACK_3:
pack_shift = 3 * interrupt_data->pack_width;
break;
default:
dev_dbg(gasket_dev->dev,
"Found interrupt description with unknown enum %d\n",
interrupt_data->interrupts[i].packing);
return;
}
mask = ~(0xFFFF << pack_shift);
value = gasket_dev_read_64(gasket_dev,
interrupt_data->interrupt_bar_index,
interrupt_data->interrupts[i].reg);
value &= mask;
value |= interrupt_data->interrupts[i].index
<< pack_shift;
}
gasket_dev_write_64(gasket_dev, value,
interrupt_data->interrupt_bar_index,
interrupt_data->interrupts[i].reg);
}
}
static void
gasket_handle_interrupt(struct gasket_interrupt_data *interrupt_data,
int interrupt_index)
{
struct eventfd_ctx *ctx;
trace_gasket_interrupt_event(interrupt_data->name, interrupt_index);
ctx = interrupt_data->eventfd_ctxs[interrupt_index];
if (ctx)
eventfd_signal(ctx, 1);
++(interrupt_data->interrupt_counts[interrupt_index]);
}
static irqreturn_t gasket_msix_interrupt_handler(int irq, void *dev_id)
{
struct gasket_interrupt_data *interrupt_data = dev_id;
int interrupt = -1;
int i;
/* If this linear lookup is a problem, we can maintain a map/hash. */
for (i = 0; i < interrupt_data->num_interrupts; i++) {
if (interrupt_data->msix_entries[i].vector == irq) {
interrupt = interrupt_data->msix_entries[i].entry;
break;
}
}
if (interrupt == -1) {
pr_err("Received unknown irq %d\n", irq);
return IRQ_HANDLED;
}
gasket_handle_interrupt(interrupt_data, interrupt);
return IRQ_HANDLED;
}
static int
gasket_interrupt_msix_init(struct gasket_interrupt_data *interrupt_data)
{
int ret = 1;
int i;
interrupt_data->msix_entries =
kcalloc(interrupt_data->num_interrupts,
sizeof(*interrupt_data->msix_entries), GFP_KERNEL);
if (!interrupt_data->msix_entries)
return -ENOMEM;
for (i = 0; i < interrupt_data->num_interrupts; i++) {
interrupt_data->msix_entries[i].entry = i;
interrupt_data->msix_entries[i].vector = 0;
interrupt_data->eventfd_ctxs[i] = NULL;
}
/* Retry MSIX_RETRY_COUNT times if not enough IRQs are available. */
for (i = 0; i < MSIX_RETRY_COUNT && ret > 0; i++)
ret = pci_enable_msix_exact(interrupt_data->pci_dev,
interrupt_data->msix_entries,
interrupt_data->num_interrupts);
if (ret)
return ret > 0 ? -EBUSY : ret;
interrupt_data->msix_configured = 1;
for (i = 0; i < interrupt_data->num_interrupts; i++) {
ret = request_irq(interrupt_data->msix_entries[i].vector,
gasket_msix_interrupt_handler, 0,
interrupt_data->name, interrupt_data);
if (ret) {
dev_err(&interrupt_data->pci_dev->dev,
"Cannot get IRQ for interrupt %d, vector %d; "
"%d\n",
i, interrupt_data->msix_entries[i].vector, ret);
return ret;
}
interrupt_data->num_configured++;
}
return 0;
}
/*
* On QCM DragonBoard, we exit gasket_interrupt_msix_init() and kernel interrupt
* setup code with MSIX vectors masked. This is wrong because nothing else in
* the driver will normally touch the MSIX vectors.
*
* As a temporary hack, force unmasking there.
*
* TODO: Figure out why QCM kernel doesn't unmask the MSIX vectors, after
* gasket_interrupt_msix_init(), and remove this code.
*/
static void force_msix_interrupt_unmasking(struct gasket_dev *gasket_dev)
{
int i;
#define MSIX_VECTOR_SIZE 16
#define MSIX_MASK_BIT_OFFSET 12
#define APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE 0x46800
for (i = 0; i < gasket_dev->interrupt_data->num_configured; i++) {
/* Check if the MSIX vector is unmasked */
ulong location = APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE +
MSIX_MASK_BIT_OFFSET + i * MSIX_VECTOR_SIZE;
u32 mask =
gasket_dev_read_32(gasket_dev,
gasket_dev->interrupt_data->interrupt_bar_index,
location);
if (!(mask & 1))
continue;
/* Unmask the msix vector (clear 32 bits) */
gasket_dev_write_32(gasket_dev, 0,
gasket_dev->interrupt_data->interrupt_bar_index,
location);
}
#undef MSIX_VECTOR_SIZE
#undef MSIX_MASK_BIT_OFFSET
#undef APEX_BAR2_REG_KERNEL_HIB_MSIX_TABLE
}
static ssize_t interrupt_sysfs_show(struct device *device,
struct device_attribute *attr, char *buf)
{
int i, ret;
ssize_t written = 0, total_written = 0;
struct gasket_interrupt_data *interrupt_data;
struct gasket_dev *gasket_dev;
struct gasket_sysfs_attribute *gasket_attr;
enum interrupt_sysfs_attribute_type sysfs_type;
gasket_dev = gasket_sysfs_get_device_data(device);
if (!gasket_dev) {
dev_dbg(device, "No sysfs mapping found for device\n");
return 0;
}
gasket_attr = gasket_sysfs_get_attr(device, attr);
if (!gasket_attr) {
dev_dbg(device, "No sysfs attr data found for device\n");
gasket_sysfs_put_device_data(device, gasket_dev);
return 0;
}
sysfs_type = (enum interrupt_sysfs_attribute_type)
gasket_attr->data.attr_type;
interrupt_data = gasket_dev->interrupt_data;
switch (sysfs_type) {
case ATTR_INTERRUPT_COUNTS:
for (i = 0; i < interrupt_data->num_interrupts; ++i) {
written =
scnprintf(buf, PAGE_SIZE - total_written,
"0x%02x: %ld\n", i,
interrupt_data->interrupt_counts[i]);
total_written += written;
buf += written;
}
ret = total_written;
break;
default:
dev_dbg(gasket_dev->dev, "Unknown attribute: %s\n",
attr->attr.name);
ret = 0;
break;
}
gasket_sysfs_put_attr(device, gasket_attr);
gasket_sysfs_put_device_data(device, gasket_dev);
return ret;
}
static struct gasket_sysfs_attribute interrupt_sysfs_attrs[] = {
GASKET_SYSFS_RO(interrupt_counts, interrupt_sysfs_show,
ATTR_INTERRUPT_COUNTS),
GASKET_END_OF_ATTR_ARRAY,
};
int gasket_interrupt_init(struct gasket_dev *gasket_dev)
{
int ret;
struct gasket_interrupt_data *interrupt_data;
const struct gasket_driver_desc *driver_desc =
gasket_get_driver_desc(gasket_dev);
interrupt_data = kzalloc(sizeof(*interrupt_data), GFP_KERNEL);
if (!interrupt_data)
return -ENOMEM;
gasket_dev->interrupt_data = interrupt_data;
interrupt_data->name = driver_desc->name;
interrupt_data->type = driver_desc->interrupt_type;
interrupt_data->pci_dev = gasket_dev->pci_dev;
interrupt_data->num_interrupts = driver_desc->num_interrupts;
interrupt_data->interrupts = driver_desc->interrupts;
interrupt_data->interrupt_bar_index = driver_desc->interrupt_bar_index;
interrupt_data->pack_width = driver_desc->interrupt_pack_width;
interrupt_data->num_configured = 0;
interrupt_data->eventfd_ctxs =
kcalloc(driver_desc->num_interrupts,
sizeof(*interrupt_data->eventfd_ctxs), GFP_KERNEL);
if (!interrupt_data->eventfd_ctxs) {
kfree(interrupt_data);
return -ENOMEM;
}
interrupt_data->interrupt_counts =
kcalloc(driver_desc->num_interrupts,
sizeof(*interrupt_data->interrupt_counts), GFP_KERNEL);
if (!interrupt_data->interrupt_counts) {
kfree(interrupt_data->eventfd_ctxs);
kfree(interrupt_data);
return -ENOMEM;
}
switch (interrupt_data->type) {
case PCI_MSIX:
ret = gasket_interrupt_msix_init(interrupt_data);
if (ret)
break;
force_msix_interrupt_unmasking(gasket_dev);
break;
default:
ret = -EINVAL;
}
if (ret) {
/* Failing to setup interrupts will cause the device to report
* GASKET_STATUS_LAMED. But it is not fatal.
*/
dev_warn(gasket_dev->dev,
"Couldn't initialize interrupts: %d\n", ret);
return 0;
}
gasket_interrupt_setup(gasket_dev);
gasket_sysfs_create_entries(gasket_dev->dev_info.device,
interrupt_sysfs_attrs);
return 0;
}
static void
gasket_interrupt_msix_cleanup(struct gasket_interrupt_data *interrupt_data)
{
int i;
for (i = 0; i < interrupt_data->num_configured; i++)
free_irq(interrupt_data->msix_entries[i].vector,
interrupt_data);
interrupt_data->num_configured = 0;
if (interrupt_data->msix_configured)
pci_disable_msix(interrupt_data->pci_dev);
interrupt_data->msix_configured = 0;
kfree(interrupt_data->msix_entries);
}
int gasket_interrupt_reinit(struct gasket_dev *gasket_dev)
{
int ret;
if (!gasket_dev->interrupt_data) {
dev_dbg(gasket_dev->dev,
"Attempted to reinit uninitialized interrupt data\n");
return -EINVAL;
}
switch (gasket_dev->interrupt_data->type) {
case PCI_MSIX:
gasket_interrupt_msix_cleanup(gasket_dev->interrupt_data);
ret = gasket_interrupt_msix_init(gasket_dev->interrupt_data);
if (ret)
break;
force_msix_interrupt_unmasking(gasket_dev);
break;
default:
ret = -EINVAL;
}
if (ret) {
/* Failing to setup interrupts will cause the device
* to report GASKET_STATUS_LAMED, but is not fatal.
*/
dev_warn(gasket_dev->dev, "Couldn't reinit interrupts: %d\n",
ret);
return 0;
}
gasket_interrupt_setup(gasket_dev);
return 0;
}
/* See gasket_interrupt.h for description. */
int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev)
{
dev_dbg(gasket_dev->dev, "Clearing interrupt counts\n");
memset(gasket_dev->interrupt_data->interrupt_counts, 0,
gasket_dev->interrupt_data->num_interrupts *
sizeof(*gasket_dev->interrupt_data->interrupt_counts));
return 0;
}
/* See gasket_interrupt.h for description. */
void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev)
{
struct gasket_interrupt_data *interrupt_data =
gasket_dev->interrupt_data;
/*
* It is possible to get an error code from gasket_interrupt_init
* before interrupt_data has been allocated, so check it.
*/
if (!interrupt_data)
return;
switch (interrupt_data->type) {
case PCI_MSIX:
gasket_interrupt_msix_cleanup(interrupt_data);
break;
default:
break;
}
kfree(interrupt_data->interrupt_counts);
kfree(interrupt_data->eventfd_ctxs);
kfree(interrupt_data);
gasket_dev->interrupt_data = NULL;
}
int gasket_interrupt_system_status(struct gasket_dev *gasket_dev)
{
if (!gasket_dev->interrupt_data) {
dev_dbg(gasket_dev->dev, "Interrupt data is null\n");
return GASKET_STATUS_DEAD;
}
if (gasket_dev->interrupt_data->num_configured !=
gasket_dev->interrupt_data->num_interrupts) {
dev_dbg(gasket_dev->dev,
"Not all interrupts were configured\n");
return GASKET_STATUS_LAMED;
}
return GASKET_STATUS_ALIVE;
}
int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
int interrupt, int event_fd)
{
struct eventfd_ctx *ctx;
if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
return -EINVAL;
ctx = eventfd_ctx_fdget(event_fd);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
interrupt_data->eventfd_ctxs[interrupt] = ctx;
return 0;
}
int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
int interrupt)
{
if (interrupt < 0 || interrupt >= interrupt_data->num_interrupts)
return -EINVAL;
if (interrupt_data->eventfd_ctxs[interrupt]) {
eventfd_ctx_put(interrupt_data->eventfd_ctxs[interrupt]);
interrupt_data->eventfd_ctxs[interrupt] = NULL;
}
return 0;
}

View File

@ -1,95 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Gasket common interrupt module. Defines functions for enabling
* eventfd-triggered interrupts between a Gasket device and a host process.
*
* Copyright (C) 2018 Google, Inc.
*/
#ifndef __GASKET_INTERRUPT_H__
#define __GASKET_INTERRUPT_H__
#include <linux/eventfd.h>
#include <linux/pci.h>
#include "gasket_core.h"
/* Note that this currently assumes that device interrupts are a dense set,
* numbered from 0 - (num_interrupts - 1). Should this have to change, these
* APIs will have to be updated.
*/
/* Opaque type used to hold interrupt subsystem data. */
struct gasket_interrupt_data;
/*
* Initialize the interrupt module.
* @gasket_dev: The Gasket device structure for the device to be initted.
*/
int gasket_interrupt_init(struct gasket_dev *gasket_dev);
/*
* Clean up a device's interrupt structure.
* @gasket_dev: The Gasket information structure for this device.
*
* Cleans up the device's interrupts and deallocates data.
*/
void gasket_interrupt_cleanup(struct gasket_dev *gasket_dev);
/*
* Clean up and re-initialize the MSI-x subsystem.
* @gasket_dev: The Gasket information structure for this device.
*
* Performs a teardown of the MSI-x subsystem and re-initializes it. Does not
* free the underlying data structures. Returns 0 on success and an error code
* on error.
*/
int gasket_interrupt_reinit(struct gasket_dev *gasket_dev);
/*
* Reset the counts stored in the interrupt subsystem.
* @gasket_dev: The Gasket information structure for this device.
*
* Sets the counts of all interrupts in the subsystem to 0.
*/
int gasket_interrupt_reset_counts(struct gasket_dev *gasket_dev);
/*
* Associates an eventfd with a device interrupt.
* @data: Pointer to device interrupt data.
* @interrupt: The device interrupt to configure.
* @event_fd: The eventfd to associate with the interrupt.
*
* Prepares the host to receive notification of device interrupts by associating
* event_fd with interrupt. Upon receipt of a device interrupt, event_fd will be
* signaled, after successful configuration.
*
* Returns 0 on success, a negative error code otherwise.
*/
int gasket_interrupt_set_eventfd(struct gasket_interrupt_data *interrupt_data,
int interrupt, int event_fd);
/*
* Removes an interrupt-eventfd association.
* @data: Pointer to device interrupt data.
* @interrupt: The device interrupt to de-associate.
*
* Removes any eventfd associated with the specified interrupt, if any.
*/
int gasket_interrupt_clear_eventfd(struct gasket_interrupt_data *interrupt_data,
int interrupt);
/*
* The below functions exist for backwards compatibility.
* No new uses should be written.
*/
/*
* Get the health of the interrupt subsystem.
* @gasket_dev: The Gasket device struct.
*
* Returns DEAD if not set up, LAMED if initialization failed, and ALIVE
* otherwise.
*/
int gasket_interrupt_system_status(struct gasket_dev *gasket_dev);
#endif

View File

@ -1,388 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018 Google, Inc. */
#include "gasket.h"
#include "gasket_ioctl.h"
#include "gasket_constants.h"
#include "gasket_core.h"
#include "gasket_interrupt.h"
#include "gasket_page_table.h"
#include <linux/compiler.h>
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#ifdef GASKET_KERNEL_TRACE_SUPPORT
#define CREATE_TRACE_POINTS
#include <trace/events/gasket_ioctl.h>
#else
#define trace_gasket_ioctl_entry(x, ...)
#define trace_gasket_ioctl_exit(x)
#define trace_gasket_ioctl_integer_data(x)
#define trace_gasket_ioctl_eventfd_data(x, ...)
#define trace_gasket_ioctl_page_table_data(x, ...)
#define trace_gasket_ioctl_config_coherent_allocator(x, ...)
#endif
/* Associate an eventfd with an interrupt. */
static int gasket_set_event_fd(struct gasket_dev *gasket_dev,
struct gasket_interrupt_eventfd __user *argp)
{
struct gasket_interrupt_eventfd die;
if (copy_from_user(&die, argp, sizeof(struct gasket_interrupt_eventfd)))
return -EFAULT;
trace_gasket_ioctl_eventfd_data(die.interrupt, die.event_fd);
return gasket_interrupt_set_eventfd(gasket_dev->interrupt_data,
die.interrupt, die.event_fd);
}
/* Read the size of the page table. */
static int gasket_read_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
struct gasket_page_table_ioctl ibuf;
struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
table = gasket_dev->page_table[ibuf.page_table_index];
ibuf.size = gasket_page_table_num_entries(table);
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
ibuf.device_address);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
return ret;
}
/* Read the size of the simple page table. */
static int gasket_read_simple_page_table_size(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret = 0;
struct gasket_page_table_ioctl ibuf;
struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
table = gasket_dev->page_table[ibuf.page_table_index];
ibuf.size = gasket_page_table_num_simple_entries(table);
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
ibuf.device_address);
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
return ret;
}
/* Set the boundary between the simple and extended page tables. */
static int gasket_partition_page_table(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
int ret;
struct gasket_page_table_ioctl ibuf;
uint max_page_table_size;
struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
table = gasket_dev->page_table[ibuf.page_table_index];
max_page_table_size = gasket_page_table_max_size(table);
if (ibuf.size > max_page_table_size) {
dev_dbg(gasket_dev->dev,
"Partition request 0x%llx too large, max is 0x%x\n",
ibuf.size, max_page_table_size);
return -EINVAL;
}
mutex_lock(&gasket_dev->mutex);
ret = gasket_page_table_partition(table, ibuf.size);
mutex_unlock(&gasket_dev->mutex);
return ret;
}
/* Map a userspace buffer to a device virtual address. */
static int gasket_map_buffers(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
struct gasket_page_table_ioctl ibuf;
struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
table = gasket_dev->page_table[ibuf.page_table_index];
if (gasket_page_table_are_addrs_bad(table, ibuf.host_address,
ibuf.device_address, ibuf.size))
return -EINVAL;
return gasket_page_table_map(table, ibuf.host_address, ibuf.device_address,
ibuf.size / PAGE_SIZE);
}
/* Unmap a userspace buffer from a device virtual address. */
static int gasket_unmap_buffers(struct gasket_dev *gasket_dev,
struct gasket_page_table_ioctl __user *argp)
{
struct gasket_page_table_ioctl ibuf;
struct gasket_page_table *table;
if (copy_from_user(&ibuf, argp, sizeof(struct gasket_page_table_ioctl)))
return -EFAULT;
trace_gasket_ioctl_page_table_data(ibuf.page_table_index, ibuf.size,
ibuf.host_address,
ibuf.device_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
table = gasket_dev->page_table[ibuf.page_table_index];
if (gasket_page_table_is_dev_addr_bad(table, ibuf.device_address, ibuf.size))
return -EINVAL;
gasket_page_table_unmap(table, ibuf.device_address, ibuf.size / PAGE_SIZE);
return 0;
}
/*
* Reserve structures for coherent allocation, and allocate or free the
* corresponding memory.
*/
static int gasket_config_coherent_allocator(struct gasket_dev *gasket_dev,
struct gasket_coherent_alloc_config_ioctl __user *argp)
{
int ret;
struct gasket_coherent_alloc_config_ioctl ibuf;
if (copy_from_user(&ibuf, argp,
sizeof(struct gasket_coherent_alloc_config_ioctl)))
return -EFAULT;
trace_gasket_ioctl_config_coherent_allocator(ibuf.enable, ibuf.size,
ibuf.dma_address);
if (ibuf.page_table_index >= gasket_dev->num_page_tables)
return -EFAULT;
if (ibuf.size > PAGE_SIZE * MAX_NUM_COHERENT_PAGES)
return -ENOMEM;
if (ibuf.enable == 0) {
ret = gasket_free_coherent_memory(gasket_dev, ibuf.size,
ibuf.dma_address,
ibuf.page_table_index);
} else {
ret = gasket_alloc_coherent_memory(gasket_dev, ibuf.size,
&ibuf.dma_address,
ibuf.page_table_index);
}
if (ret)
return ret;
if (copy_to_user(argp, &ibuf, sizeof(ibuf)))
return -EFAULT;
return 0;
}
/* Check permissions for Gasket ioctls. */
static bool gasket_ioctl_check_permissions(struct file *filp, uint cmd)
{
bool alive;
bool read, write;
struct gasket_dev *gasket_dev = (struct gasket_dev *)filp->private_data;
alive = (gasket_dev->status == GASKET_STATUS_ALIVE);
if (!alive)
dev_dbg(gasket_dev->dev, "%s alive %d status %d\n",
__func__, alive, gasket_dev->status);
read = !!(filp->f_mode & FMODE_READ);
write = !!(filp->f_mode & FMODE_WRITE);
switch (cmd) {
case GASKET_IOCTL_RESET:
case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
return write;
case GASKET_IOCTL_PAGE_TABLE_SIZE:
case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
case GASKET_IOCTL_NUMBER_PAGE_TABLES:
return read;
case GASKET_IOCTL_PARTITION_PAGE_TABLE:
case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
return alive && write;
case GASKET_IOCTL_MAP_BUFFER:
case GASKET_IOCTL_UNMAP_BUFFER:
return alive && write;
case GASKET_IOCTL_CLEAR_EVENTFD:
case GASKET_IOCTL_SET_EVENTFD:
return alive && write;
}
return false; /* unknown permissions */
}
/*
* standard ioctl dispatch function.
* @filp: File structure pointer describing this node usage session.
* @cmd: ioctl number to handle.
* @argp: ioctl-specific data pointer.
*
* Standard ioctl dispatcher; forwards operations to individual handlers.
*/
long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp)
{
struct gasket_dev *gasket_dev;
unsigned long arg = (unsigned long)argp;
gasket_ioctl_permissions_cb_t ioctl_permissions_cb;
int retval;
gasket_dev = (struct gasket_dev *)filp->private_data;
trace_gasket_ioctl_entry(gasket_dev->dev_info.name, cmd);
ioctl_permissions_cb = gasket_get_ioctl_permissions_cb(gasket_dev);
if (ioctl_permissions_cb) {
retval = ioctl_permissions_cb(filp, cmd, argp);
if (retval < 0) {
trace_gasket_ioctl_exit(retval);
return retval;
} else if (retval == 0) {
trace_gasket_ioctl_exit(-EPERM);
return -EPERM;
}
} else if (!gasket_ioctl_check_permissions(filp, cmd)) {
trace_gasket_ioctl_exit(-EPERM);
dev_dbg(gasket_dev->dev, "ioctl cmd=%x noperm\n", cmd);
return -EPERM;
}
/* Tracing happens in this switch statement for all ioctls with
* an integer argrument, but ioctls with a struct argument
* that needs copying and decoding, that tracing is done within
* the handler call.
*/
switch (cmd) {
case GASKET_IOCTL_RESET:
retval = gasket_reset(gasket_dev);
break;
case GASKET_IOCTL_SET_EVENTFD:
retval = gasket_set_event_fd(gasket_dev, argp);
break;
case GASKET_IOCTL_CLEAR_EVENTFD:
trace_gasket_ioctl_integer_data(arg);
retval =
gasket_interrupt_clear_eventfd(gasket_dev->interrupt_data,
(int)arg);
break;
case GASKET_IOCTL_PARTITION_PAGE_TABLE:
trace_gasket_ioctl_integer_data(arg);
retval = gasket_partition_page_table(gasket_dev, argp);
break;
case GASKET_IOCTL_NUMBER_PAGE_TABLES:
trace_gasket_ioctl_integer_data(gasket_dev->num_page_tables);
if (copy_to_user(argp, &gasket_dev->num_page_tables,
sizeof(uint64_t)))
retval = -EFAULT;
else
retval = 0;
break;
case GASKET_IOCTL_PAGE_TABLE_SIZE:
retval = gasket_read_page_table_size(gasket_dev, argp);
break;
case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
retval = gasket_read_simple_page_table_size(gasket_dev, argp);
break;
case GASKET_IOCTL_MAP_BUFFER:
retval = gasket_map_buffers(gasket_dev, argp);
break;
case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
retval = gasket_config_coherent_allocator(gasket_dev, argp);
break;
case GASKET_IOCTL_UNMAP_BUFFER:
retval = gasket_unmap_buffers(gasket_dev, argp);
break;
case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
/* Clear interrupt counts doesn't take an arg, so use 0. */
trace_gasket_ioctl_integer_data(0);
retval = gasket_interrupt_reset_counts(gasket_dev);
break;
default:
/* If we don't understand the ioctl, the best we can do is trace
* the arg.
*/
trace_gasket_ioctl_integer_data(arg);
dev_dbg(gasket_dev->dev,
"Unknown ioctl cmd=0x%x not caught by gasket_is_supported_ioctl\n",
cmd);
retval = -EINVAL;
break;
}
trace_gasket_ioctl_exit(retval);
return retval;
}
/*
* Determines if an ioctl is part of the standard Gasket framework.
* @cmd: The ioctl number to handle.
*
* Returns 1 if the ioctl is supported and 0 otherwise.
*/
long gasket_is_supported_ioctl(uint cmd)
{
switch (cmd) {
case GASKET_IOCTL_RESET:
case GASKET_IOCTL_SET_EVENTFD:
case GASKET_IOCTL_CLEAR_EVENTFD:
case GASKET_IOCTL_PARTITION_PAGE_TABLE:
case GASKET_IOCTL_NUMBER_PAGE_TABLES:
case GASKET_IOCTL_PAGE_TABLE_SIZE:
case GASKET_IOCTL_SIMPLE_PAGE_TABLE_SIZE:
case GASKET_IOCTL_MAP_BUFFER:
case GASKET_IOCTL_UNMAP_BUFFER:
case GASKET_IOCTL_CLEAR_INTERRUPT_COUNTS:
case GASKET_IOCTL_CONFIG_COHERENT_ALLOCATOR:
return 1;
default:
return 0;
}
}

View File

@ -1,28 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (C) 2018 Google, Inc. */
#ifndef __GASKET_IOCTL_H__
#define __GASKET_IOCTL_H__
#include "gasket_core.h"
#include <linux/compiler.h>
/*
* Handle Gasket common ioctls.
* @filp: Pointer to the ioctl's file.
* @cmd: Ioctl command.
* @arg: Ioctl argument pointer.
*
* Returns 0 on success and nonzero on failure.
*/
long gasket_handle_ioctl(struct file *filp, uint cmd, void __user *argp);
/*
* Determines if an ioctl is part of the standard Gasket framework.
* @cmd: The ioctl number to handle.
*
* Returns 1 if the ioctl is supported and 0 otherwise.
*/
long gasket_is_supported_ioctl(uint cmd);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,249 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Gasket Page Table functionality. This file describes the address
* translation/paging functionality supported by the Gasket driver framework.
* As much as possible, internal details are hidden to simplify use -
* all calls are thread-safe (protected by an internal mutex) except where
* indicated otherwise.
*
* Copyright (C) 2018 Google, Inc.
*/
#ifndef __GASKET_PAGE_TABLE_H__
#define __GASKET_PAGE_TABLE_H__
#include <linux/pci.h>
#include <linux/types.h>
#include "gasket_constants.h"
#include "gasket_core.h"
/*
* Structure used for managing address translation on a device. All details are
* internal to the implementation.
*/
struct gasket_page_table;
/*
* Allocate and init address translation data.
* @ppage_table: Pointer to Gasket page table pointer. Set by this call.
* @att_base_reg: [Mapped] pointer to the first entry in the device's address
* translation table.
* @extended_offset_reg: [Mapped] pointer to the device's register containing
* the starting index of the extended translation table.
* @extended_bit_location: The index of the bit indicating whether an address
* is extended.
* @total_entries: The total number of entries in the device's address
* translation table.
* @device: Device structure for the underlying device. Only used for logging.
* @pci_dev: PCI system descriptor for the underlying device.
* whether the driver will supply its own.
*
* Description: Allocates and initializes data to track address translation -
* simple and extended page table metadata. Initially, the page table is
* partitioned such that all addresses are "simple" (single-level lookup).
* gasket_partition_page_table can be called to change this paritioning.
*
* Returns 0 on success, a negative error code otherwise.
*/
int gasket_page_table_init(struct gasket_page_table **ppg_tbl,
const struct gasket_bar_data *bar_data,
const struct gasket_page_table_config *page_table_config,
struct device *device, struct pci_dev *pci_dev);
/*
* Deallocate and cleanup page table data.
* @page_table: Gasket page table pointer.
*
* Description: The inverse of gasket_init; frees page_table and its contained
* elements.
*
* Because this call destroys the page table, it cannot be
* thread-safe (mutex-protected)!
*/
void gasket_page_table_cleanup(struct gasket_page_table *page_table);
/*
* Sets the size of the simple page table.
* @page_table: Gasket page table pointer.
* @num_simple_entries: Desired size of the simple page table (in entries).
*
* Description: gasket_partition_page_table checks to see if the simple page
* size can be changed (i.e., if there are no active extended
* mappings in the new simple size range), and, if so,
* sets the new simple and extended page table sizes.
*
* Returns 0 if successful, or non-zero if the page table entries
* are not free.
*/
int gasket_page_table_partition(struct gasket_page_table *page_table,
uint num_simple_entries);
/*
* Get and map [host] user space pages into device memory.
* @page_table: Gasket page table pointer.
* @host_addr: Starting host virtual memory address of the pages.
* @dev_addr: Starting device address of the pages.
* @num_pages: Number of [4kB] pages to map.
*
* Description: Maps the "num_pages" pages of host memory pointed to by
* host_addr to the address "dev_addr" in device memory.
*
* The caller is responsible for checking the addresses ranges.
*
* Returns 0 if successful or a non-zero error number otherwise.
* If there is an error, no pages are mapped.
*/
int gasket_page_table_map(struct gasket_page_table *page_table, ulong host_addr,
ulong dev_addr, uint num_pages);
/*
* Un-map host pages from device memory.
* @page_table: Gasket page table pointer.
* @dev_addr: Starting device address of the pages to unmap.
* @num_pages: The number of [4kB] pages to unmap.
*
* Description: The inverse of gasket_map_pages. Unmaps pages from the device.
*/
void gasket_page_table_unmap(struct gasket_page_table *page_table,
ulong dev_addr, uint num_pages);
/*
* Unmap ALL host pages from device memory.
* @page_table: Gasket page table pointer.
*/
void gasket_page_table_unmap_all(struct gasket_page_table *page_table);
/*
* Unmap all host pages from device memory and reset the table to fully simple
* addressing.
* @page_table: Gasket page table pointer.
*/
void gasket_page_table_reset(struct gasket_page_table *page_table);
/*
* Reclaims unused page table memory.
* @page_table: Gasket page table pointer.
*
* Description: Examines the page table and frees any currently-unused
* allocations. Called internally on gasket_cleanup().
*/
void gasket_page_table_garbage_collect(struct gasket_page_table *page_table);
/*
* Retrieve the backing page for a device address.
* @page_table: Gasket page table pointer.
* @dev_addr: Gasket device address.
* @ppage: Pointer to a page pointer for the returned page.
* @poffset: Pointer to an unsigned long for the returned offset.
*
* Description: Interprets the address and looks up the corresponding page
* in the page table and the offset in that page. (We need an
* offset because the host page may be larger than the Gasket chip
* page it contains.)
*
* Returns 0 if successful, -1 for an error. The page pointer
* and offset are returned through the pointers, if successful.
*/
int gasket_page_table_lookup_page(struct gasket_page_table *page_table,
ulong dev_addr, struct page **page,
ulong *poffset);
/*
* Checks validity for input addrs and size.
* @page_table: Gasket page table pointer.
* @host_addr: Host address to check.
* @dev_addr: Gasket device address.
* @bytes: Size of the range to check (in bytes).
*
* Description: This call performs a number of checks to verify that the ranges
* specified by both addresses and the size are valid for mapping pages into
* device memory.
*
* Returns true if the mapping is bad, false otherwise.
*/
bool gasket_page_table_are_addrs_bad(struct gasket_page_table *page_table,
ulong host_addr, ulong dev_addr,
ulong bytes);
/*
* Checks validity for input dev addr and size.
* @page_table: Gasket page table pointer.
* @dev_addr: Gasket device address.
* @bytes: Size of the range to check (in bytes).
*
* Description: This call performs a number of checks to verify that the range
* specified by the device address and the size is valid for mapping pages into
* device memory.
*
* Returns true if the address is bad, false otherwise.
*/
bool gasket_page_table_is_dev_addr_bad(struct gasket_page_table *page_table,
ulong dev_addr, ulong bytes);
/*
* Gets maximum size for the given page table.
* @page_table: Gasket page table pointer.
*/
uint gasket_page_table_max_size(struct gasket_page_table *page_table);
/*
* Gets the total number of entries in the arg.
* @page_table: Gasket page table pointer.
*/
uint gasket_page_table_num_entries(struct gasket_page_table *page_table);
/*
* Gets the number of simple entries.
* @page_table: Gasket page table pointer.
*/
uint gasket_page_table_num_simple_entries(struct gasket_page_table *page_table);
/*
* Gets the number of actively pinned pages.
* @page_table: Gasket page table pointer.
*/
uint gasket_page_table_num_active_pages(struct gasket_page_table *page_table);
/*
* Get status of page table managed by @page_table.
* @page_table: Gasket page table pointer.
*/
int gasket_page_table_system_status(struct gasket_page_table *page_table);
/*
* Allocate a block of coherent memory.
* @gasket_dev: Gasket Device.
* @size: Size of the memory block.
* @dma_address: Dma address allocated by the kernel.
* @index: Index of the gasket_page_table within this Gasket device
*
* Description: Allocate a contiguous coherent memory block, DMA'ble
* by this device.
*/
int gasket_alloc_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
dma_addr_t *dma_address, uint64_t index);
/* Release a block of contiguous coherent memory, in use by a device. */
int gasket_free_coherent_memory(struct gasket_dev *gasket_dev, uint64_t size,
dma_addr_t dma_address, uint64_t index);
/* Release all coherent memory. */
void gasket_free_coherent_memory_all(struct gasket_dev *gasket_dev,
uint64_t index);
/*
* Records the host_addr to coherent dma memory mapping.
* @gasket_dev: Gasket Device.
* @size: Size of the virtual address range to map.
* @dma_address: Dma address within the coherent memory range.
* @vma: Virtual address we wish to map to coherent memory.
*
* Description: For each page in the virtual address range, record the
* coherent page mapping.
*
* Does not perform validity checking.
*/
int gasket_set_user_virt(struct gasket_dev *gasket_dev, uint64_t size,
dma_addr_t dma_address, ulong vma);
#endif /* __GASKET_PAGE_TABLE_H__ */

View File

@ -1,398 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018 Google, Inc. */
#include "gasket_sysfs.h"
#include "gasket_core.h"
#include <linux/device.h>
#include <linux/printk.h>
/*
* Pair of kernel device and user-specified pointer. Used in lookups in sysfs
* "show" functions to return user data.
*/
struct gasket_sysfs_mapping {
/*
* The device bound to this mapping. If this is NULL, then this mapping
* is free.
*/
struct device *device;
/* The Gasket descriptor for this device. */
struct gasket_dev *gasket_dev;
/* This device's set of sysfs attributes/nodes. */
struct gasket_sysfs_attribute *attributes;
/* The number of live elements in "attributes". */
int attribute_count;
/* Protects structure from simultaneous access. */
struct mutex mutex;
/* Tracks active users of this mapping. */
struct kref refcount;
};
/*
* Data needed to manage users of this sysfs utility.
* Currently has a fixed size; if space is a concern, this can be dynamically
* allocated.
*/
/*
* 'Global' (file-scoped) list of mappings between devices and gasket_data
* pointers. This removes the requirement to have a gasket_sysfs_data
* handle in all files.
*/
static struct gasket_sysfs_mapping dev_mappings[GASKET_SYSFS_NUM_MAPPINGS];
/* Callback when a mapping's refcount goes to zero. */
static void release_entry(struct kref *ref)
{
/* All work is done after the return from kref_put. */
}
/* Look up mapping information for the given device. */
static struct gasket_sysfs_mapping *get_mapping(struct device *device)
{
int i;
for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
mutex_lock(&dev_mappings[i].mutex);
if (dev_mappings[i].device == device) {
kref_get(&dev_mappings[i].refcount);
mutex_unlock(&dev_mappings[i].mutex);
return &dev_mappings[i];
}
mutex_unlock(&dev_mappings[i].mutex);
}
dev_dbg(device, "%s: Mapping to device %s not found\n",
__func__, device->kobj.name);
return NULL;
}
/* Put a reference to a mapping. */
static void put_mapping(struct gasket_sysfs_mapping *mapping)
{
int i;
int num_files_to_remove = 0;
struct device_attribute *files_to_remove;
struct device *device;
if (!mapping) {
pr_debug("%s: Mapping should not be NULL\n", __func__);
return;
}
mutex_lock(&mapping->mutex);
if (kref_put(&mapping->refcount, release_entry)) {
dev_dbg(mapping->device, "Removing Gasket sysfs mapping\n");
/*
* We can't remove the sysfs nodes in the kref callback, since
* device_remove_file() blocks until the node is free.
* Readers/writers of sysfs nodes, though, will be blocked on
* the mapping mutex, resulting in deadlock. To fix this, the
* sysfs nodes are removed outside the lock.
*/
device = mapping->device;
num_files_to_remove = mapping->attribute_count;
files_to_remove = kcalloc(num_files_to_remove,
sizeof(*files_to_remove),
GFP_KERNEL);
if (files_to_remove)
for (i = 0; i < num_files_to_remove; i++)
files_to_remove[i] =
mapping->attributes[i].attr;
else
num_files_to_remove = 0;
kfree(mapping->attributes);
mapping->attributes = NULL;
mapping->attribute_count = 0;
put_device(mapping->device);
mapping->device = NULL;
mapping->gasket_dev = NULL;
}
mutex_unlock(&mapping->mutex);
if (num_files_to_remove != 0) {
for (i = 0; i < num_files_to_remove; ++i)
device_remove_file(device, &files_to_remove[i]);
kfree(files_to_remove);
}
}
/*
* Put a reference to a mapping N times.
*
* In higher-level resource acquire/release function pairs, the release function
* will need to release a mapping 2x - once for the refcount taken in the
* release function itself, and once for the count taken in the acquire call.
*/
static void put_mapping_n(struct gasket_sysfs_mapping *mapping, int times)
{
int i;
for (i = 0; i < times; i++)
put_mapping(mapping);
}
void gasket_sysfs_init(void)
{
int i;
for (i = 0; i < GASKET_SYSFS_NUM_MAPPINGS; i++) {
dev_mappings[i].device = NULL;
mutex_init(&dev_mappings[i].mutex);
}
}
int gasket_sysfs_create_mapping(struct device *device,
struct gasket_dev *gasket_dev)
{
struct gasket_sysfs_mapping *mapping;
int map_idx = -1;
/*
* We need a function-level mutex to protect against the same device
* being added [multiple times] simultaneously.
*/
static DEFINE_MUTEX(function_mutex);
mutex_lock(&function_mutex);
dev_dbg(device, "Creating sysfs entries for device\n");
/* Check that the device we're adding hasn't already been added. */
mapping = get_mapping(device);
if (mapping) {
dev_err(device,
"Attempting to re-initialize sysfs mapping for device\n");
put_mapping(mapping);
mutex_unlock(&function_mutex);
return -EBUSY;
}
/* Find the first empty entry in the array. */
for (map_idx = 0; map_idx < GASKET_SYSFS_NUM_MAPPINGS; ++map_idx) {
mutex_lock(&dev_mappings[map_idx].mutex);
if (!dev_mappings[map_idx].device)
/* Break with the mutex held! */
break;
mutex_unlock(&dev_mappings[map_idx].mutex);
}
if (map_idx == GASKET_SYSFS_NUM_MAPPINGS) {
dev_err(device, "All mappings have been exhausted\n");
mutex_unlock(&function_mutex);
return -ENOMEM;
}
dev_dbg(device, "Creating sysfs mapping for device %s\n",
device->kobj.name);
mapping = &dev_mappings[map_idx];
mapping->attributes = kcalloc(GASKET_SYSFS_MAX_NODES,
sizeof(*mapping->attributes),
GFP_KERNEL);
if (!mapping->attributes) {
dev_dbg(device, "Unable to allocate sysfs attribute array\n");
mutex_unlock(&mapping->mutex);
mutex_unlock(&function_mutex);
return -ENOMEM;
}
kref_init(&mapping->refcount);
mapping->device = get_device(device);
mapping->gasket_dev = gasket_dev;
mapping->attribute_count = 0;
mutex_unlock(&mapping->mutex);
mutex_unlock(&function_mutex);
/* Don't decrement the refcount here! One open count keeps it alive! */
return 0;
}
int gasket_sysfs_create_entries(struct device *device,
const struct gasket_sysfs_attribute *attrs)
{
int i;
int ret;
struct gasket_sysfs_mapping *mapping = get_mapping(device);
if (!mapping) {
dev_dbg(device,
"Creating entries for device without first initializing mapping\n");
return -EINVAL;
}
mutex_lock(&mapping->mutex);
for (i = 0; attrs[i].attr.attr.name; i++) {
if (mapping->attribute_count == GASKET_SYSFS_MAX_NODES) {
dev_err(device,
"Maximum number of sysfs nodes reached for device\n");
mutex_unlock(&mapping->mutex);
put_mapping(mapping);
return -ENOMEM;
}
ret = device_create_file(device, &attrs[i].attr);
if (ret) {
dev_dbg(device, "Unable to create device entries\n");
mutex_unlock(&mapping->mutex);
put_mapping(mapping);
return ret;
}
mapping->attributes[mapping->attribute_count] = attrs[i];
++mapping->attribute_count;
}
mutex_unlock(&mapping->mutex);
put_mapping(mapping);
return 0;
}
EXPORT_SYMBOL(gasket_sysfs_create_entries);
void gasket_sysfs_remove_mapping(struct device *device)
{
struct gasket_sysfs_mapping *mapping = get_mapping(device);
if (!mapping) {
dev_err(device,
"Attempted to remove non-existent sysfs mapping to device\n");
return;
}
put_mapping_n(mapping, 2);
}
struct gasket_dev *gasket_sysfs_get_device_data(struct device *device)
{
struct gasket_sysfs_mapping *mapping = get_mapping(device);
if (!mapping) {
dev_err(device, "device not registered\n");
return NULL;
}
return mapping->gasket_dev;
}
EXPORT_SYMBOL(gasket_sysfs_get_device_data);
void gasket_sysfs_put_device_data(struct device *device, struct gasket_dev *dev)
{
struct gasket_sysfs_mapping *mapping = get_mapping(device);
if (!mapping)
return;
/* See comment of put_mapping_n() for why the '2' is necessary. */
put_mapping_n(mapping, 2);
}
EXPORT_SYMBOL(gasket_sysfs_put_device_data);
struct gasket_sysfs_attribute *
gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr)
{
int i;
int num_attrs;
struct gasket_sysfs_mapping *mapping = get_mapping(device);
struct gasket_sysfs_attribute *attrs = NULL;
if (!mapping)
return NULL;
attrs = mapping->attributes;
num_attrs = mapping->attribute_count;
for (i = 0; i < num_attrs; ++i) {
if (!strcmp(attrs[i].attr.attr.name, attr->attr.name))
return &attrs[i];
}
dev_err(device, "Unable to find match for device_attribute %s\n",
attr->attr.name);
return NULL;
}
EXPORT_SYMBOL(gasket_sysfs_get_attr);
void gasket_sysfs_put_attr(struct device *device,
struct gasket_sysfs_attribute *attr)
{
int i;
int num_attrs;
struct gasket_sysfs_mapping *mapping = get_mapping(device);
struct gasket_sysfs_attribute *attrs = NULL;
if (!mapping)
return;
attrs = mapping->attributes;
num_attrs = mapping->attribute_count;
for (i = 0; i < num_attrs; ++i) {
if (&attrs[i] == attr) {
put_mapping_n(mapping, 2);
return;
}
}
dev_err(device, "Unable to put unknown attribute: %s\n",
attr->attr.attr.name);
put_mapping(mapping);
}
EXPORT_SYMBOL(gasket_sysfs_put_attr);
ssize_t gasket_sysfs_register_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
ulong parsed_value = 0;
struct gasket_sysfs_mapping *mapping;
struct gasket_dev *gasket_dev;
struct gasket_sysfs_attribute *gasket_attr;
if (count < 3 || buf[0] != '0' || buf[1] != 'x') {
dev_err(device,
"sysfs register write format: \"0x<hex value>\"\n");
return -EINVAL;
}
if (kstrtoul(buf, 16, &parsed_value) != 0) {
dev_err(device,
"Unable to parse input as 64-bit hex value: %s\n", buf);
return -EINVAL;
}
mapping = get_mapping(device);
if (!mapping) {
dev_err(device, "Device driver may have been removed\n");
return 0;
}
gasket_dev = mapping->gasket_dev;
if (!gasket_dev) {
dev_err(device, "Device driver may have been removed\n");
put_mapping(mapping);
return 0;
}
gasket_attr = gasket_sysfs_get_attr(device, attr);
if (!gasket_attr) {
put_mapping(mapping);
return count;
}
gasket_dev_write_64(gasket_dev, parsed_value,
gasket_attr->data.bar_address.bar,
gasket_attr->data.bar_address.offset);
if (gasket_attr->write_callback)
gasket_attr->write_callback(gasket_dev, gasket_attr,
parsed_value);
gasket_sysfs_put_attr(device, gasket_attr);
put_mapping(mapping);
return count;
}
EXPORT_SYMBOL(gasket_sysfs_register_store);

View File

@ -1,175 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Set of common sysfs utilities.
*
* Copyright (C) 2018 Google, Inc.
*/
/* The functions described here are a set of utilities to allow each file in the
* Gasket driver framework to manage their own set of sysfs entries, instead of
* centralizing all that work in one file.
*
* The goal of these utilities is to allow for sysfs entries to be easily
* created without causing a proliferation of sysfs "show" functions. This
* requires O(N) string lookups during show function execution, but as reading
* sysfs entries is rarely performance-critical, this is likely acceptible.
*/
#ifndef __GASKET_SYSFS_H__
#define __GASKET_SYSFS_H__
#include "gasket_constants.h"
#include "gasket_core.h"
#include <linux/device.h>
#include <linux/stringify.h>
#include <linux/sysfs.h>
/* The maximum number of mappings/devices a driver needs to support. */
#define GASKET_SYSFS_NUM_MAPPINGS (GASKET_FRAMEWORK_DESC_MAX * GASKET_DEV_MAX)
/* The maximum number of sysfs nodes in a directory.
*/
#define GASKET_SYSFS_MAX_NODES 196
/*
* Terminator struct for a gasket_sysfs_attr array. Must be at the end of
* all gasket_sysfs_attribute arrays.
*/
#define GASKET_END_OF_ATTR_ARRAY \
{ \
.attr = __ATTR_NULL, \
.data.attr_type = 0, \
}
/*
* Pairing of sysfs attribute and user data.
* Used in lookups in sysfs "show" functions to return attribute metadata.
*/
struct gasket_sysfs_attribute {
/* The underlying sysfs device attribute associated with this data. */
struct device_attribute attr;
/* User-specified data to associate with the attribute. */
union {
struct bar_address_ {
ulong bar;
ulong offset;
} bar_address;
uint attr_type;
} data;
/*
* Function pointer to a callback to be invoked when this attribute is
* written (if so configured). The arguments are to the Gasket device
* pointer, the enclosing gasket_attr structure, and the value written.
* The callback should perform any logging necessary, as errors cannot
* be returned from the callback.
*/
void (*write_callback)(struct gasket_dev *dev,
struct gasket_sysfs_attribute *attr,
ulong value);
};
#define GASKET_SYSFS_RO(_name, _show_function, _attr_type) \
{ \
.attr = __ATTR(_name, 0444, _show_function, NULL), \
.data.attr_type = _attr_type \
}
/* Initializes the Gasket sysfs subsystem.
*
* Description: Performs one-time initialization. Must be called before usage
* at [Gasket] module load time.
*/
void gasket_sysfs_init(void);
/*
* Create an entry in mapping_data between a device and a Gasket device.
* @device: Device struct to map to.
* @gasket_dev: The dev struct associated with the driver controlling @device.
*
* Description: This function maps a gasket_dev* to a device*. This mapping can
* be used in sysfs_show functions to get a handle to the gasket_dev struct
* controlling the device node.
*
* If this function is not called before gasket_sysfs_create_entries, a warning
* will be logged.
*/
int gasket_sysfs_create_mapping(struct device *device,
struct gasket_dev *gasket_dev);
/*
* Creates bulk entries in sysfs.
* @device: Kernel device structure.
* @attrs: List of attributes/sysfs entries to create.
*
* Description: Creates each sysfs entry described in "attrs". Can be called
* multiple times for a given @device. If the gasket_dev specified in
* gasket_sysfs_create_mapping had a legacy device, the entries will be created
* for it, as well.
*/
int gasket_sysfs_create_entries(struct device *device,
const struct gasket_sysfs_attribute *attrs);
/*
* Removes a device mapping from the global table.
* @device: Device to unmap.
*
* Description: Removes the device->Gasket device mapping from the internal
* table.
*/
void gasket_sysfs_remove_mapping(struct device *device);
/*
* User data lookup based on kernel device structure.
* @device: Kernel device structure.
*
* Description: Returns the user data associated with "device" in a prior call
* to gasket_sysfs_create_entries. Returns NULL if no mapping can be found.
* Upon success, this call take a reference to internal sysfs data that must be
* released with gasket_sysfs_put_device_data. While this reference is held, the
* underlying device sysfs information/structure will remain valid/will not be
* deleted.
*/
struct gasket_dev *gasket_sysfs_get_device_data(struct device *device);
/*
* Releases a references to internal data.
* @device: Kernel device structure.
* @dev: Gasket device descriptor (returned by gasket_sysfs_get_device_data).
*/
void gasket_sysfs_put_device_data(struct device *device,
struct gasket_dev *gasket_dev);
/*
* Gasket-specific attribute lookup.
* @device: Kernel device structure.
* @attr: Device attribute to look up.
*
* Returns the Gasket sysfs attribute associated with the kernel device
* attribute and device structure itself. Upon success, this call will take a
* reference to internal sysfs data that must be released with a call to
* gasket_sysfs_put_attr. While this reference is held, the underlying device
* sysfs information/structure will remain valid/will not be deleted.
*/
struct gasket_sysfs_attribute *
gasket_sysfs_get_attr(struct device *device, struct device_attribute *attr);
/*
* Releases a references to internal data.
* @device: Kernel device structure.
* @attr: Gasket sysfs attribute descriptor (returned by
* gasket_sysfs_get_attr).
*/
void gasket_sysfs_put_attr(struct device *device,
struct gasket_sysfs_attribute *attr);
/*
* Write to a register sysfs node.
* @buf: NULL-terminated data being written.
* @count: number of bytes in the "buf" argument.
*/
ssize_t gasket_sysfs_register_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count);
#endif /* __GASKET_SYSFS_H__ */