char/misc driver patches for 4.11-rc1
Here is the big char/misc driver patchset for 4.11-rc1. Lots of different driver subsystems updated here. Rework for the hyperv subsystem to handle new platforms better, mei and w1 and extcon driver updates, as well as a number of other "minor" driver updates. Full details are in the shortlog below. All of these have been in linux-next for a while with no reported issues. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> -----BEGIN PGP SIGNATURE----- iG0EABECAC0WIQT0tgzFv3jCIUoxPcsxR9QN2y37KQUCWK2iRQ8cZ3JlZ0Brcm9h aC5jb20ACgkQMUfUDdst+ynhFACguVE+/ixj5u5bT5DXQaZNai/6zIAAmgMWwd/t YTD2cwsJsGbTT1fY3SUe =CiSI -----END PGP SIGNATURE----- Merge tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc Pull char/misc driver updates from Greg KH: "Here is the big char/misc driver patchset for 4.11-rc1. Lots of different driver subsystems updated here: rework for the hyperv subsystem to handle new platforms better, mei and w1 and extcon driver updates, as well as a number of other "minor" driver updates. All of these have been in linux-next for a while with no reported issues" * tag 'char-misc-4.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (169 commits) goldfish: Sanitize the broken interrupt handler x86/platform/goldfish: Prevent unconditional loading vmbus: replace modulus operation with subtraction vmbus: constify parameters where possible vmbus: expose hv_begin/end_read vmbus: remove conditional locking of vmbus_write vmbus: add direct isr callback mode vmbus: change to per channel tasklet vmbus: put related per-cpu variable together vmbus: callback is in softirq not workqueue binder: Add support for file-descriptor arrays binder: Add support for scatter-gather binder: Add extra size to allocator binder: Refactor binder_transact() binder: Support multiple /dev instances binder: Deal with contexts in debugfs binder: Support multiple context managers binder: Split flat_binder_object auxdisplay: ht16k33: remove private workqueue auxdisplay: ht16k33: rework input device initialization ...
This commit is contained in:
commit
e30aee9e10
@ -11,7 +11,7 @@ DOCBOOKS := z8530book.xml \
|
||||
writing_usb_driver.xml networking.xml \
|
||||
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
|
||||
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
|
||||
genericirq.xml s390-drivers.xml scsi.xml \
|
||||
sh.xml regulator.xml w1.xml \
|
||||
writing_musb_glue_layer.xml iio.xml
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1195,6 +1195,10 @@
|
||||
When zero, profiling data is discarded and associated
|
||||
debugfs files are removed at module unload time.
|
||||
|
||||
goldfish [X86] Enable the goldfish android emulator platform.
|
||||
Don't use this when you are not running on the
|
||||
android emulator
|
||||
|
||||
gpt [EFI] Forces disk with valid GPT signature but
|
||||
invalid Protective MBR to be treated as GPT. If the
|
||||
primary GPT is corrupted, it enables the backup/alternate
|
||||
|
44
Documentation/devicetree/bindings/misc/idt_89hpesx.txt
Normal file
44
Documentation/devicetree/bindings/misc/idt_89hpesx.txt
Normal file
@ -0,0 +1,44 @@
|
||||
EEPROM / CSR SMBus-slave interface of IDT 89HPESx devices
|
||||
|
||||
Required properties:
|
||||
- compatible : should be "<manufacturer>,<type>"
|
||||
Basically there is only one manufacturer: idt, but some
|
||||
compatible devices may be produced in future. Following devices
|
||||
are supported: 89hpes8nt2, 89hpes12nt3, 89hpes24nt6ag2,
|
||||
89hpes32nt8ag2, 89hpes32nt8bg2, 89hpes12nt12g2, 89hpes16nt16g2,
|
||||
89hpes24nt24g2, 89hpes32nt24ag2, 89hpes32nt24bg2;
|
||||
89hpes12n3, 89hpes12n3a, 89hpes24n3, 89hpes24n3a;
|
||||
89hpes32h8, 89hpes32h8g2, 89hpes48h12, 89hpes48h12g2,
|
||||
89hpes48h12ag2, 89hpes16h16, 89hpes22h16, 89hpes22h16g2,
|
||||
89hpes34h16, 89hpes34h16g2, 89hpes64h16, 89hpes64h16g2,
|
||||
89hpes64h16ag2;
|
||||
89hpes12t3g2, 89hpes24t3g2, 89hpes16t4, 89hpes4t4g2,
|
||||
89hpes10t4g2, 89hpes16t4g2, 89hpes16t4ag2, 89hpes5t5,
|
||||
89hpes6t5, 89hpes8t5, 89hpes8t5a, 89hpes24t6, 89hpes6t6g2,
|
||||
89hpes24t6g2, 89hpes16t7, 89hpes32t8, 89hpes32t8g2,
|
||||
89hpes48t12, 89hpes48t12g2.
|
||||
- reg : I2C address of the IDT 89HPESx device.
|
||||
|
||||
Optionally there can be EEPROM-compatible subnode:
|
||||
- compatible: There are five EEPROM devices supported: 24c32, 24c64, 24c128,
|
||||
24c256 and 24c512 differed by size.
|
||||
- reg: Custom address of EEPROM device (If not specified IDT 89HPESx
|
||||
(optional) device will try to communicate with EEPROM sited by default
|
||||
address - 0x50)
|
||||
- read-only : Parameterless property disables writes to the EEPROM
|
||||
(optional)
|
||||
|
||||
Example:
|
||||
idt@60 {
|
||||
compatible = "idt,89hpes32nt8ag2";
|
||||
reg = <0x74>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
eeprom@50 {
|
||||
compatible = "onsemi,24c64";
|
||||
reg = <0x50>;
|
||||
read-only;
|
||||
};
|
||||
};
|
||||
|
@ -1,13 +1,15 @@
|
||||
Freescale i.MX6 On-Chip OTP Controller (OCOTP) device tree bindings
|
||||
|
||||
This binding represents the on-chip eFuse OTP controller found on
|
||||
i.MX6Q/D, i.MX6DL/S, i.MX6SL, and i.MX6SX SoCs.
|
||||
i.MX6Q/D, i.MX6DL/S, i.MX6SL, i.MX6SX and i.MX6UL SoCs.
|
||||
|
||||
Required properties:
|
||||
- compatible: should be one of
|
||||
"fsl,imx6q-ocotp" (i.MX6Q/D/DL/S),
|
||||
"fsl,imx6sl-ocotp" (i.MX6SL), or
|
||||
"fsl,imx6sx-ocotp" (i.MX6SX), followed by "syscon".
|
||||
"fsl,imx6sx-ocotp" (i.MX6SX),
|
||||
"fsl,imx6ul-ocotp" (i.MX6UL),
|
||||
followed by "syscon".
|
||||
- reg: Should contain the register base and length.
|
||||
- clocks: Should contain a phandle pointing to the gated peripheral clock.
|
||||
|
||||
|
@ -42,6 +42,12 @@ Optional properties in the area nodes:
|
||||
and in use by another device or devices
|
||||
- export : indicates that the reserved SRAM area may be accessed outside
|
||||
of the kernel, e.g. by bootloader or userspace
|
||||
- protect-exec : Same as 'pool' above but with the additional
|
||||
constraint that code wil be run from the region and
|
||||
that the memory is maintained as read-only, executable
|
||||
during code execution. NOTE: This region must be page
|
||||
aligned on start and end in order to properly allow
|
||||
manipulation of the page attributes.
|
||||
- label : the name for the reserved partition, if omitted, the label
|
||||
is taken from the node name excluding the unit address.
|
||||
|
||||
|
@ -30,6 +30,7 @@ available subsections can be seen below.
|
||||
miscellaneous
|
||||
vme
|
||||
80211/index
|
||||
uio-howto
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
705
Documentation/driver-api/uio-howto.rst
Normal file
705
Documentation/driver-api/uio-howto.rst
Normal file
@ -0,0 +1,705 @@
|
||||
=======================
|
||||
The Userspace I/O HOWTO
|
||||
=======================
|
||||
|
||||
:Author: Hans-Jürgen Koch Linux developer, Linutronix
|
||||
:Date: 2006-12-11
|
||||
|
||||
About this document
|
||||
===================
|
||||
|
||||
Translations
|
||||
------------
|
||||
|
||||
If you know of any translations for this document, or you are interested
|
||||
in translating it, please email me hjk@hansjkoch.de.
|
||||
|
||||
Preface
|
||||
-------
|
||||
|
||||
For many types of devices, creating a Linux kernel driver is overkill.
|
||||
All that is really needed is some way to handle an interrupt and provide
|
||||
access to the memory space of the device. The logic of controlling the
|
||||
device does not necessarily have to be within the kernel, as the device
|
||||
does not need to take advantage of any of other resources that the
|
||||
kernel provides. One such common class of devices that are like this are
|
||||
for industrial I/O cards.
|
||||
|
||||
To address this situation, the userspace I/O system (UIO) was designed.
|
||||
For typical industrial I/O cards, only a very small kernel module is
|
||||
needed. The main part of the driver will run in user space. This
|
||||
simplifies development and reduces the risk of serious bugs within a
|
||||
kernel module.
|
||||
|
||||
Please note that UIO is not an universal driver interface. Devices that
|
||||
are already handled well by other kernel subsystems (like networking or
|
||||
serial or USB) are no candidates for an UIO driver. Hardware that is
|
||||
ideally suited for an UIO driver fulfills all of the following:
|
||||
|
||||
- The device has memory that can be mapped. The device can be
|
||||
controlled completely by writing to this memory.
|
||||
|
||||
- The device usually generates interrupts.
|
||||
|
||||
- The device does not fit into one of the standard kernel subsystems.
|
||||
|
||||
Acknowledgments
|
||||
---------------
|
||||
|
||||
I'd like to thank Thomas Gleixner and Benedikt Spranger of Linutronix,
|
||||
who have not only written most of the UIO code, but also helped greatly
|
||||
writing this HOWTO by giving me all kinds of background information.
|
||||
|
||||
Feedback
|
||||
--------
|
||||
|
||||
Find something wrong with this document? (Or perhaps something right?) I
|
||||
would love to hear from you. Please email me at hjk@hansjkoch.de.
|
||||
|
||||
About UIO
|
||||
=========
|
||||
|
||||
If you use UIO for your card's driver, here's what you get:
|
||||
|
||||
- only one small kernel module to write and maintain.
|
||||
|
||||
- develop the main part of your driver in user space, with all the
|
||||
tools and libraries you're used to.
|
||||
|
||||
- bugs in your driver won't crash the kernel.
|
||||
|
||||
- updates of your driver can take place without recompiling the kernel.
|
||||
|
||||
How UIO works
|
||||
-------------
|
||||
|
||||
Each UIO device is accessed through a device file and several sysfs
|
||||
attribute files. The device file will be called ``/dev/uio0`` for the
|
||||
first device, and ``/dev/uio1``, ``/dev/uio2`` and so on for subsequent
|
||||
devices.
|
||||
|
||||
``/dev/uioX`` is used to access the address space of the card. Just use
|
||||
:c:func:`mmap()` to access registers or RAM locations of your card.
|
||||
|
||||
Interrupts are handled by reading from ``/dev/uioX``. A blocking
|
||||
:c:func:`read()` from ``/dev/uioX`` will return as soon as an
|
||||
interrupt occurs. You can also use :c:func:`select()` on
|
||||
``/dev/uioX`` to wait for an interrupt. The integer value read from
|
||||
``/dev/uioX`` represents the total interrupt count. You can use this
|
||||
number to figure out if you missed some interrupts.
|
||||
|
||||
For some hardware that has more than one interrupt source internally,
|
||||
but not separate IRQ mask and status registers, there might be
|
||||
situations where userspace cannot determine what the interrupt source
|
||||
was if the kernel handler disables them by writing to the chip's IRQ
|
||||
register. In such a case, the kernel has to disable the IRQ completely
|
||||
to leave the chip's register untouched. Now the userspace part can
|
||||
determine the cause of the interrupt, but it cannot re-enable
|
||||
interrupts. Another cornercase is chips where re-enabling interrupts is
|
||||
a read-modify-write operation to a combined IRQ status/acknowledge
|
||||
register. This would be racy if a new interrupt occurred simultaneously.
|
||||
|
||||
To address these problems, UIO also implements a write() function. It is
|
||||
normally not used and can be ignored for hardware that has only a single
|
||||
interrupt source or has separate IRQ mask and status registers. If you
|
||||
need it, however, a write to ``/dev/uioX`` will call the
|
||||
:c:func:`irqcontrol()` function implemented by the driver. You have
|
||||
to write a 32-bit value that is usually either 0 or 1 to disable or
|
||||
enable interrupts. If a driver does not implement
|
||||
:c:func:`irqcontrol()`, :c:func:`write()` will return with
|
||||
``-ENOSYS``.
|
||||
|
||||
To handle interrupts properly, your custom kernel module can provide its
|
||||
own interrupt handler. It will automatically be called by the built-in
|
||||
handler.
|
||||
|
||||
For cards that don't generate interrupts but need to be polled, there is
|
||||
the possibility to set up a timer that triggers the interrupt handler at
|
||||
configurable time intervals. This interrupt simulation is done by
|
||||
calling :c:func:`uio_event_notify()` from the timer's event
|
||||
handler.
|
||||
|
||||
Each driver provides attributes that are used to read or write
|
||||
variables. These attributes are accessible through sysfs files. A custom
|
||||
kernel driver module can add its own attributes to the device owned by
|
||||
the uio driver, but not added to the UIO device itself at this time.
|
||||
This might change in the future if it would be found to be useful.
|
||||
|
||||
The following standard attributes are provided by the UIO framework:
|
||||
|
||||
- ``name``: The name of your device. It is recommended to use the name
|
||||
of your kernel module for this.
|
||||
|
||||
- ``version``: A version string defined by your driver. This allows the
|
||||
user space part of your driver to deal with different versions of the
|
||||
kernel module.
|
||||
|
||||
- ``event``: The total number of interrupts handled by the driver since
|
||||
the last time the device node was read.
|
||||
|
||||
These attributes appear under the ``/sys/class/uio/uioX`` directory.
|
||||
Please note that this directory might be a symlink, and not a real
|
||||
directory. Any userspace code that accesses it must be able to handle
|
||||
this.
|
||||
|
||||
Each UIO device can make one or more memory regions available for memory
|
||||
mapping. This is necessary because some industrial I/O cards require
|
||||
access to more than one PCI memory region in a driver.
|
||||
|
||||
Each mapping has its own directory in sysfs, the first mapping appears
|
||||
as ``/sys/class/uio/uioX/maps/map0/``. Subsequent mappings create
|
||||
directories ``map1/``, ``map2/``, and so on. These directories will only
|
||||
appear if the size of the mapping is not 0.
|
||||
|
||||
Each ``mapX/`` directory contains four read-only files that show
|
||||
attributes of the memory:
|
||||
|
||||
- ``name``: A string identifier for this mapping. This is optional, the
|
||||
string can be empty. Drivers can set this to make it easier for
|
||||
userspace to find the correct mapping.
|
||||
|
||||
- ``addr``: The address of memory that can be mapped.
|
||||
|
||||
- ``size``: The size, in bytes, of the memory pointed to by addr.
|
||||
|
||||
- ``offset``: The offset, in bytes, that has to be added to the pointer
|
||||
returned by :c:func:`mmap()` to get to the actual device memory.
|
||||
This is important if the device's memory is not page aligned.
|
||||
Remember that pointers returned by :c:func:`mmap()` are always
|
||||
page aligned, so it is good style to always add this offset.
|
||||
|
||||
From userspace, the different mappings are distinguished by adjusting
|
||||
the ``offset`` parameter of the :c:func:`mmap()` call. To map the
|
||||
memory of mapping N, you have to use N times the page size as your
|
||||
offset::
|
||||
|
||||
offset = N * getpagesize();
|
||||
|
||||
Sometimes there is hardware with memory-like regions that can not be
|
||||
mapped with the technique described here, but there are still ways to
|
||||
access them from userspace. The most common example are x86 ioports. On
|
||||
x86 systems, userspace can access these ioports using
|
||||
:c:func:`ioperm()`, :c:func:`iopl()`, :c:func:`inb()`,
|
||||
:c:func:`outb()`, and similar functions.
|
||||
|
||||
Since these ioport regions can not be mapped, they will not appear under
|
||||
``/sys/class/uio/uioX/maps/`` like the normal memory described above.
|
||||
Without information about the port regions a hardware has to offer, it
|
||||
becomes difficult for the userspace part of the driver to find out which
|
||||
ports belong to which UIO device.
|
||||
|
||||
To address this situation, the new directory
|
||||
``/sys/class/uio/uioX/portio/`` was added. It only exists if the driver
|
||||
wants to pass information about one or more port regions to userspace.
|
||||
If that is the case, subdirectories named ``port0``, ``port1``, and so
|
||||
on, will appear underneath ``/sys/class/uio/uioX/portio/``.
|
||||
|
||||
Each ``portX/`` directory contains four read-only files that show name,
|
||||
start, size, and type of the port region:
|
||||
|
||||
- ``name``: A string identifier for this port region. The string is
|
||||
optional and can be empty. Drivers can set it to make it easier for
|
||||
userspace to find a certain port region.
|
||||
|
||||
- ``start``: The first port of this region.
|
||||
|
||||
- ``size``: The number of ports in this region.
|
||||
|
||||
- ``porttype``: A string describing the type of port.
|
||||
|
||||
Writing your own kernel module
|
||||
==============================
|
||||
|
||||
Please have a look at ``uio_cif.c`` as an example. The following
|
||||
paragraphs explain the different sections of this file.
|
||||
|
||||
struct uio_info
|
||||
---------------
|
||||
|
||||
This structure tells the framework the details of your driver, Some of
|
||||
the members are required, others are optional.
|
||||
|
||||
- ``const char *name``: Required. The name of your driver as it will
|
||||
appear in sysfs. I recommend using the name of your module for this.
|
||||
|
||||
- ``const char *version``: Required. This string appears in
|
||||
``/sys/class/uio/uioX/version``.
|
||||
|
||||
- ``struct uio_mem mem[ MAX_UIO_MAPS ]``: Required if you have memory
|
||||
that can be mapped with :c:func:`mmap()`. For each mapping you
|
||||
need to fill one of the ``uio_mem`` structures. See the description
|
||||
below for details.
|
||||
|
||||
- ``struct uio_port port[ MAX_UIO_PORTS_REGIONS ]``: Required if you
|
||||
want to pass information about ioports to userspace. For each port
|
||||
region you need to fill one of the ``uio_port`` structures. See the
|
||||
description below for details.
|
||||
|
||||
- ``long irq``: Required. If your hardware generates an interrupt, it's
|
||||
your modules task to determine the irq number during initialization.
|
||||
If you don't have a hardware generated interrupt but want to trigger
|
||||
the interrupt handler in some other way, set ``irq`` to
|
||||
``UIO_IRQ_CUSTOM``. If you had no interrupt at all, you could set
|
||||
``irq`` to ``UIO_IRQ_NONE``, though this rarely makes sense.
|
||||
|
||||
- ``unsigned long irq_flags``: Required if you've set ``irq`` to a
|
||||
hardware interrupt number. The flags given here will be used in the
|
||||
call to :c:func:`request_irq()`.
|
||||
|
||||
- ``int (*mmap)(struct uio_info *info, struct vm_area_struct *vma)``:
|
||||
Optional. If you need a special :c:func:`mmap()`
|
||||
function, you can set it here. If this pointer is not NULL, your
|
||||
:c:func:`mmap()` will be called instead of the built-in one.
|
||||
|
||||
- ``int (*open)(struct uio_info *info, struct inode *inode)``:
|
||||
Optional. You might want to have your own :c:func:`open()`,
|
||||
e.g. to enable interrupts only when your device is actually used.
|
||||
|
||||
- ``int (*release)(struct uio_info *info, struct inode *inode)``:
|
||||
Optional. If you define your own :c:func:`open()`, you will
|
||||
probably also want a custom :c:func:`release()` function.
|
||||
|
||||
- ``int (*irqcontrol)(struct uio_info *info, s32 irq_on)``:
|
||||
Optional. If you need to be able to enable or disable interrupts
|
||||
from userspace by writing to ``/dev/uioX``, you can implement this
|
||||
function. The parameter ``irq_on`` will be 0 to disable interrupts
|
||||
and 1 to enable them.
|
||||
|
||||
Usually, your device will have one or more memory regions that can be
|
||||
mapped to user space. For each region, you have to set up a
|
||||
``struct uio_mem`` in the ``mem[]`` array. Here's a description of the
|
||||
fields of ``struct uio_mem``:
|
||||
|
||||
- ``const char *name``: Optional. Set this to help identify the memory
|
||||
region, it will show up in the corresponding sysfs node.
|
||||
|
||||
- ``int memtype``: Required if the mapping is used. Set this to
|
||||
``UIO_MEM_PHYS`` if you you have physical memory on your card to be
|
||||
mapped. Use ``UIO_MEM_LOGICAL`` for logical memory (e.g. allocated
|
||||
with :c:func:`kmalloc()`). There's also ``UIO_MEM_VIRTUAL`` for
|
||||
virtual memory.
|
||||
|
||||
- ``phys_addr_t addr``: Required if the mapping is used. Fill in the
|
||||
address of your memory block. This address is the one that appears in
|
||||
sysfs.
|
||||
|
||||
- ``resource_size_t size``: Fill in the size of the memory block that
|
||||
``addr`` points to. If ``size`` is zero, the mapping is considered
|
||||
unused. Note that you *must* initialize ``size`` with zero for all
|
||||
unused mappings.
|
||||
|
||||
- ``void *internal_addr``: If you have to access this memory region
|
||||
from within your kernel module, you will want to map it internally by
|
||||
using something like :c:func:`ioremap()`. Addresses returned by
|
||||
this function cannot be mapped to user space, so you must not store
|
||||
it in ``addr``. Use ``internal_addr`` instead to remember such an
|
||||
address.
|
||||
|
||||
Please do not touch the ``map`` element of ``struct uio_mem``! It is
|
||||
used by the UIO framework to set up sysfs files for this mapping. Simply
|
||||
leave it alone.
|
||||
|
||||
Sometimes, your device can have one or more port regions which can not
|
||||
be mapped to userspace. But if there are other possibilities for
|
||||
userspace to access these ports, it makes sense to make information
|
||||
about the ports available in sysfs. For each region, you have to set up
|
||||
a ``struct uio_port`` in the ``port[]`` array. Here's a description of
|
||||
the fields of ``struct uio_port``:
|
||||
|
||||
- ``char *porttype``: Required. Set this to one of the predefined
|
||||
constants. Use ``UIO_PORT_X86`` for the ioports found in x86
|
||||
architectures.
|
||||
|
||||
- ``unsigned long start``: Required if the port region is used. Fill in
|
||||
the number of the first port of this region.
|
||||
|
||||
- ``unsigned long size``: Fill in the number of ports in this region.
|
||||
If ``size`` is zero, the region is considered unused. Note that you
|
||||
*must* initialize ``size`` with zero for all unused regions.
|
||||
|
||||
Please do not touch the ``portio`` element of ``struct uio_port``! It is
|
||||
used internally by the UIO framework to set up sysfs files for this
|
||||
region. Simply leave it alone.
|
||||
|
||||
Adding an interrupt handler
|
||||
---------------------------
|
||||
|
||||
What you need to do in your interrupt handler depends on your hardware
|
||||
and on how you want to handle it. You should try to keep the amount of
|
||||
code in your kernel interrupt handler low. If your hardware requires no
|
||||
action that you *have* to perform after each interrupt, then your
|
||||
handler can be empty.
|
||||
|
||||
If, on the other hand, your hardware *needs* some action to be performed
|
||||
after each interrupt, then you *must* do it in your kernel module. Note
|
||||
that you cannot rely on the userspace part of your driver. Your
|
||||
userspace program can terminate at any time, possibly leaving your
|
||||
hardware in a state where proper interrupt handling is still required.
|
||||
|
||||
There might also be applications where you want to read data from your
|
||||
hardware at each interrupt and buffer it in a piece of kernel memory
|
||||
you've allocated for that purpose. With this technique you could avoid
|
||||
loss of data if your userspace program misses an interrupt.
|
||||
|
||||
A note on shared interrupts: Your driver should support interrupt
|
||||
sharing whenever this is possible. It is possible if and only if your
|
||||
driver can detect whether your hardware has triggered the interrupt or
|
||||
not. This is usually done by looking at an interrupt status register. If
|
||||
your driver sees that the IRQ bit is actually set, it will perform its
|
||||
actions, and the handler returns IRQ_HANDLED. If the driver detects
|
||||
that it was not your hardware that caused the interrupt, it will do
|
||||
nothing and return IRQ_NONE, allowing the kernel to call the next
|
||||
possible interrupt handler.
|
||||
|
||||
If you decide not to support shared interrupts, your card won't work in
|
||||
computers with no free interrupts. As this frequently happens on the PC
|
||||
platform, you can save yourself a lot of trouble by supporting interrupt
|
||||
sharing.
|
||||
|
||||
Using uio_pdrv for platform devices
|
||||
-----------------------------------
|
||||
|
||||
In many cases, UIO drivers for platform devices can be handled in a
|
||||
generic way. In the same place where you define your
|
||||
``struct platform_device``, you simply also implement your interrupt
|
||||
handler and fill your ``struct uio_info``. A pointer to this
|
||||
``struct uio_info`` is then used as ``platform_data`` for your platform
|
||||
device.
|
||||
|
||||
You also need to set up an array of ``struct resource`` containing
|
||||
addresses and sizes of your memory mappings. This information is passed
|
||||
to the driver using the ``.resource`` and ``.num_resources`` elements of
|
||||
``struct platform_device``.
|
||||
|
||||
You now have to set the ``.name`` element of ``struct platform_device``
|
||||
to ``"uio_pdrv"`` to use the generic UIO platform device driver. This
|
||||
driver will fill the ``mem[]`` array according to the resources given,
|
||||
and register the device.
|
||||
|
||||
The advantage of this approach is that you only have to edit a file you
|
||||
need to edit anyway. You do not have to create an extra driver.
|
||||
|
||||
Using uio_pdrv_genirq for platform devices
|
||||
------------------------------------------
|
||||
|
||||
Especially in embedded devices, you frequently find chips where the irq
|
||||
pin is tied to its own dedicated interrupt line. In such cases, where
|
||||
you can be really sure the interrupt is not shared, we can take the
|
||||
concept of ``uio_pdrv`` one step further and use a generic interrupt
|
||||
handler. That's what ``uio_pdrv_genirq`` does.
|
||||
|
||||
The setup for this driver is the same as described above for
|
||||
``uio_pdrv``, except that you do not implement an interrupt handler. The
|
||||
``.handler`` element of ``struct uio_info`` must remain ``NULL``. The
|
||||
``.irq_flags`` element must not contain ``IRQF_SHARED``.
|
||||
|
||||
You will set the ``.name`` element of ``struct platform_device`` to
|
||||
``"uio_pdrv_genirq"`` to use this driver.
|
||||
|
||||
The generic interrupt handler of ``uio_pdrv_genirq`` will simply disable
|
||||
the interrupt line using :c:func:`disable_irq_nosync()`. After
|
||||
doing its work, userspace can reenable the interrupt by writing
|
||||
0x00000001 to the UIO device file. The driver already implements an
|
||||
:c:func:`irq_control()` to make this possible, you must not
|
||||
implement your own.
|
||||
|
||||
Using ``uio_pdrv_genirq`` not only saves a few lines of interrupt
|
||||
handler code. You also do not need to know anything about the chip's
|
||||
internal registers to create the kernel part of the driver. All you need
|
||||
to know is the irq number of the pin the chip is connected to.
|
||||
|
||||
Using uio_dmem_genirq for platform devices
|
||||
------------------------------------------
|
||||
|
||||
In addition to statically allocated memory ranges, they may also be a
|
||||
desire to use dynamically allocated regions in a user space driver. In
|
||||
particular, being able to access memory made available through the
|
||||
dma-mapping API, may be particularly useful. The ``uio_dmem_genirq``
|
||||
driver provides a way to accomplish this.
|
||||
|
||||
This driver is used in a similar manner to the ``"uio_pdrv_genirq"``
|
||||
driver with respect to interrupt configuration and handling.
|
||||
|
||||
Set the ``.name`` element of ``struct platform_device`` to
|
||||
``"uio_dmem_genirq"`` to use this driver.
|
||||
|
||||
When using this driver, fill in the ``.platform_data`` element of
|
||||
``struct platform_device``, which is of type
|
||||
``struct uio_dmem_genirq_pdata`` and which contains the following
|
||||
elements:
|
||||
|
||||
- ``struct uio_info uioinfo``: The same structure used as the
|
||||
``uio_pdrv_genirq`` platform data
|
||||
|
||||
- ``unsigned int *dynamic_region_sizes``: Pointer to list of sizes of
|
||||
dynamic memory regions to be mapped into user space.
|
||||
|
||||
- ``unsigned int num_dynamic_regions``: Number of elements in
|
||||
``dynamic_region_sizes`` array.
|
||||
|
||||
The dynamic regions defined in the platform data will be appended to the
|
||||
`` mem[] `` array after the platform device resources, which implies
|
||||
that the total number of static and dynamic memory regions cannot exceed
|
||||
``MAX_UIO_MAPS``.
|
||||
|
||||
The dynamic memory regions will be allocated when the UIO device file,
|
||||
``/dev/uioX`` is opened. Similar to static memory resources, the memory
|
||||
region information for dynamic regions is then visible via sysfs at
|
||||
``/sys/class/uio/uioX/maps/mapY/*``. The dynamic memory regions will be
|
||||
freed when the UIO device file is closed. When no processes are holding
|
||||
the device file open, the address returned to userspace is ~0.
|
||||
|
||||
Writing a driver in userspace
|
||||
=============================
|
||||
|
||||
Once you have a working kernel module for your hardware, you can write
|
||||
the userspace part of your driver. You don't need any special libraries,
|
||||
your driver can be written in any reasonable language, you can use
|
||||
floating point numbers and so on. In short, you can use all the tools
|
||||
and libraries you'd normally use for writing a userspace application.
|
||||
|
||||
Getting information about your UIO device
|
||||
-----------------------------------------
|
||||
|
||||
Information about all UIO devices is available in sysfs. The first thing
|
||||
you should do in your driver is check ``name`` and ``version`` to make
|
||||
sure your talking to the right device and that its kernel driver has the
|
||||
version you expect.
|
||||
|
||||
You should also make sure that the memory mapping you need exists and
|
||||
has the size you expect.
|
||||
|
||||
There is a tool called ``lsuio`` that lists UIO devices and their
|
||||
attributes. It is available here:
|
||||
|
||||
http://www.osadl.org/projects/downloads/UIO/user/
|
||||
|
||||
With ``lsuio`` you can quickly check if your kernel module is loaded and
|
||||
which attributes it exports. Have a look at the manpage for details.
|
||||
|
||||
The source code of ``lsuio`` can serve as an example for getting
|
||||
information about an UIO device. The file ``uio_helper.c`` contains a
|
||||
lot of functions you could use in your userspace driver code.
|
||||
|
||||
mmap() device memory
|
||||
--------------------
|
||||
|
||||
After you made sure you've got the right device with the memory mappings
|
||||
you need, all you have to do is to call :c:func:`mmap()` to map the
|
||||
device's memory to userspace.
|
||||
|
||||
The parameter ``offset`` of the :c:func:`mmap()` call has a special
|
||||
meaning for UIO devices: It is used to select which mapping of your
|
||||
device you want to map. To map the memory of mapping N, you have to use
|
||||
N times the page size as your offset::
|
||||
|
||||
offset = N * getpagesize();
|
||||
|
||||
N starts from zero, so if you've got only one memory range to map, set
|
||||
``offset = 0``. A drawback of this technique is that memory is always
|
||||
mapped beginning with its start address.
|
||||
|
||||
Waiting for interrupts
|
||||
----------------------
|
||||
|
||||
After you successfully mapped your devices memory, you can access it
|
||||
like an ordinary array. Usually, you will perform some initialization.
|
||||
After that, your hardware starts working and will generate an interrupt
|
||||
as soon as it's finished, has some data available, or needs your
|
||||
attention because an error occurred.
|
||||
|
||||
``/dev/uioX`` is a read-only file. A :c:func:`read()` will always
|
||||
block until an interrupt occurs. There is only one legal value for the
|
||||
``count`` parameter of :c:func:`read()`, and that is the size of a
|
||||
signed 32 bit integer (4). Any other value for ``count`` causes
|
||||
:c:func:`read()` to fail. The signed 32 bit integer read is the
|
||||
interrupt count of your device. If the value is one more than the value
|
||||
you read the last time, everything is OK. If the difference is greater
|
||||
than one, you missed interrupts.
|
||||
|
||||
You can also use :c:func:`select()` on ``/dev/uioX``.
|
||||
|
||||
Generic PCI UIO driver
|
||||
======================
|
||||
|
||||
The generic driver is a kernel module named uio_pci_generic. It can
|
||||
work with any device compliant to PCI 2.3 (circa 2002) and any compliant
|
||||
PCI Express device. Using this, you only need to write the userspace
|
||||
driver, removing the need to write a hardware-specific kernel module.
|
||||
|
||||
Making the driver recognize the device
|
||||
--------------------------------------
|
||||
|
||||
Since the driver does not declare any device ids, it will not get loaded
|
||||
automatically and will not automatically bind to any devices, you must
|
||||
load it and allocate id to the driver yourself. For example::
|
||||
|
||||
modprobe uio_pci_generic
|
||||
echo "8086 10f5" > /sys/bus/pci/drivers/uio_pci_generic/new_id
|
||||
|
||||
If there already is a hardware specific kernel driver for your device,
|
||||
the generic driver still won't bind to it, in this case if you want to
|
||||
use the generic driver (why would you?) you'll have to manually unbind
|
||||
the hardware specific driver and bind the generic driver, like this::
|
||||
|
||||
echo -n 0000:00:19.0 > /sys/bus/pci/drivers/e1000e/unbind
|
||||
echo -n 0000:00:19.0 > /sys/bus/pci/drivers/uio_pci_generic/bind
|
||||
|
||||
You can verify that the device has been bound to the driver by looking
|
||||
for it in sysfs, for example like the following::
|
||||
|
||||
ls -l /sys/bus/pci/devices/0000:00:19.0/driver
|
||||
|
||||
Which if successful should print::
|
||||
|
||||
.../0000:00:19.0/driver -> ../../../bus/pci/drivers/uio_pci_generic
|
||||
|
||||
Note that the generic driver will not bind to old PCI 2.2 devices. If
|
||||
binding the device failed, run the following command::
|
||||
|
||||
dmesg
|
||||
|
||||
and look in the output for failure reasons.
|
||||
|
||||
Things to know about uio_pci_generic
|
||||
------------------------------------
|
||||
|
||||
Interrupts are handled using the Interrupt Disable bit in the PCI
|
||||
command register and Interrupt Status bit in the PCI status register.
|
||||
All devices compliant to PCI 2.3 (circa 2002) and all compliant PCI
|
||||
Express devices should support these bits. uio_pci_generic detects
|
||||
this support, and won't bind to devices which do not support the
|
||||
Interrupt Disable Bit in the command register.
|
||||
|
||||
On each interrupt, uio_pci_generic sets the Interrupt Disable bit.
|
||||
This prevents the device from generating further interrupts until the
|
||||
bit is cleared. The userspace driver should clear this bit before
|
||||
blocking and waiting for more interrupts.
|
||||
|
||||
Writing userspace driver using uio_pci_generic
|
||||
------------------------------------------------
|
||||
|
||||
Userspace driver can use pci sysfs interface, or the libpci library that
|
||||
wraps it, to talk to the device and to re-enable interrupts by writing
|
||||
to the command register.
|
||||
|
||||
Example code using uio_pci_generic
|
||||
----------------------------------
|
||||
|
||||
Here is some sample userspace driver code using uio_pci_generic::
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <unistd.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <errno.h>
|
||||
|
||||
int main()
|
||||
{
|
||||
int uiofd;
|
||||
int configfd;
|
||||
int err;
|
||||
int i;
|
||||
unsigned icount;
|
||||
unsigned char command_high;
|
||||
|
||||
uiofd = open("/dev/uio0", O_RDONLY);
|
||||
if (uiofd < 0) {
|
||||
perror("uio open:");
|
||||
return errno;
|
||||
}
|
||||
configfd = open("/sys/class/uio/uio0/device/config", O_RDWR);
|
||||
if (configfd < 0) {
|
||||
perror("config open:");
|
||||
return errno;
|
||||
}
|
||||
|
||||
/* Read and cache command value */
|
||||
err = pread(configfd, &command_high, 1, 5);
|
||||
if (err != 1) {
|
||||
perror("command config read:");
|
||||
return errno;
|
||||
}
|
||||
command_high &= ~0x4;
|
||||
|
||||
for(i = 0;; ++i) {
|
||||
/* Print out a message, for debugging. */
|
||||
if (i == 0)
|
||||
fprintf(stderr, "Started uio test driver.\n");
|
||||
else
|
||||
fprintf(stderr, "Interrupts: %d\n", icount);
|
||||
|
||||
/****************************************/
|
||||
/* Here we got an interrupt from the
|
||||
device. Do something to it. */
|
||||
/****************************************/
|
||||
|
||||
/* Re-enable interrupts. */
|
||||
err = pwrite(configfd, &command_high, 1, 5);
|
||||
if (err != 1) {
|
||||
perror("config write:");
|
||||
break;
|
||||
}
|
||||
|
||||
/* Wait for next interrupt. */
|
||||
err = read(uiofd, &icount, 4);
|
||||
if (err != 4) {
|
||||
perror("uio read:");
|
||||
break;
|
||||
}
|
||||
|
||||
}
|
||||
return errno;
|
||||
}
|
||||
|
||||
Generic Hyper-V UIO driver
|
||||
==========================
|
||||
|
||||
The generic driver is a kernel module named uio_hv_generic. It
|
||||
supports devices on the Hyper-V VMBus similar to uio_pci_generic on
|
||||
PCI bus.
|
||||
|
||||
Making the driver recognize the device
|
||||
--------------------------------------
|
||||
|
||||
Since the driver does not declare any device GUID's, it will not get
|
||||
loaded automatically and will not automatically bind to any devices, you
|
||||
must load it and allocate id to the driver yourself. For example, to use
|
||||
the network device GUID::
|
||||
|
||||
modprobe uio_hv_generic
|
||||
echo "f8615163-df3e-46c5-913f-f2d2f965ed0e" > /sys/bus/vmbus/drivers/uio_hv_generic/new_id
|
||||
|
||||
If there already is a hardware specific kernel driver for the device,
|
||||
the generic driver still won't bind to it, in this case if you want to
|
||||
use the generic driver (why would you?) you'll have to manually unbind
|
||||
the hardware specific driver and bind the generic driver, like this::
|
||||
|
||||
echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/hv_netvsc/unbind
|
||||
echo -n vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3 > /sys/bus/vmbus/drivers/uio_hv_generic/bind
|
||||
|
||||
You can verify that the device has been bound to the driver by looking
|
||||
for it in sysfs, for example like the following::
|
||||
|
||||
ls -l /sys/bus/vmbus/devices/vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver
|
||||
|
||||
Which if successful should print::
|
||||
|
||||
.../vmbus-ed963694-e847-4b2a-85af-bc9cfc11d6f3/driver -> ../../../bus/vmbus/drivers/uio_hv_generic
|
||||
|
||||
Things to know about uio_hv_generic
|
||||
-----------------------------------
|
||||
|
||||
On each interrupt, uio_hv_generic sets the Interrupt Disable bit. This
|
||||
prevents the device from generating further interrupts until the bit is
|
||||
cleared. The userspace driver should clear this bit before blocking and
|
||||
waiting for more interrupts.
|
||||
|
||||
Further information
|
||||
===================
|
||||
|
||||
- `OSADL homepage. <http://www.osadl.org>`_
|
||||
|
||||
- `Linutronix homepage. <http://www.linutronix.de>`_
|
22
Documentation/extcon/intel-int3496.txt
Normal file
22
Documentation/extcon/intel-int3496.txt
Normal file
@ -0,0 +1,22 @@
|
||||
Intel INT3496 ACPI device extcon driver documentation
|
||||
-----------------------------------------------------
|
||||
|
||||
The Intel INT3496 ACPI device extcon driver is a driver for ACPI
|
||||
devices with an acpi-id of INT3496, such as found for example on
|
||||
Intel Baytrail and Cherrytrail tablets.
|
||||
|
||||
This ACPI device describes how the OS can read the id-pin of the devices'
|
||||
USB-otg port, as well as how it optionally can enable Vbus output on the
|
||||
otg port and how it can optionally control the muxing of the data pins
|
||||
between an USB host and an USB peripheral controller.
|
||||
|
||||
The ACPI devices exposes this functionality by returning an array with up
|
||||
to 3 gpio descriptors from its ACPI _CRS (Current Resource Settings) call:
|
||||
|
||||
Index 0: The input gpio for the id-pin, this is always present and valid
|
||||
Index 1: The output gpio for enabling Vbus output from the device to the otg
|
||||
port, write 1 to enable the Vbus output (this gpio descriptor may
|
||||
be absent or invalid)
|
||||
Index 2: The output gpio for muxing of the data pins between the USB host and
|
||||
the USB peripheral controller, write 1 to mux to the peripheral
|
||||
controller
|
@ -22,7 +22,16 @@ To program the FPGA from a file or from a buffer:
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count);
|
||||
|
||||
Load the FPGA from an image which exists as a buffer in memory.
|
||||
Load the FPGA from an image which exists as a contiguous buffer in
|
||||
memory. Allocating contiguous kernel memory for the buffer should be avoided,
|
||||
users are encouraged to use the _sg interface instead of this.
|
||||
|
||||
int fpga_mgr_buf_load_sg(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
struct sg_table *sgt);
|
||||
|
||||
Load the FPGA from an image from non-contiguous in memory. Callers can
|
||||
construct a sg_table using alloc_page backed memory.
|
||||
|
||||
int fpga_mgr_firmware_load(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
@ -166,7 +175,7 @@ success or negative error codes otherwise.
|
||||
|
||||
The programming sequence is:
|
||||
1. .write_init
|
||||
2. .write (may be called once or multiple times)
|
||||
2. .write or .write_sg (may be called once or multiple times)
|
||||
3. .write_complete
|
||||
|
||||
The .write_init function will prepare the FPGA to receive the image data. The
|
||||
@ -176,7 +185,11 @@ buffer up at least this much before starting.
|
||||
|
||||
The .write function writes a buffer to the FPGA. The buffer may be contain the
|
||||
whole FPGA image or may be a smaller chunk of an FPGA image. In the latter
|
||||
case, this function is called multiple times for successive chunks.
|
||||
case, this function is called multiple times for successive chunks. This interface
|
||||
is suitable for drivers which use PIO.
|
||||
|
||||
The .write_sg version behaves the same as .write except the input is a sg_table
|
||||
scatter list. This interface is suitable for drivers which use DMA.
|
||||
|
||||
The .write_complete function is called after all the image has been written
|
||||
to put the FPGA into operating mode.
|
||||
|
@ -5993,6 +5993,7 @@ S: Maintained
|
||||
F: arch/x86/include/asm/mshyperv.h
|
||||
F: arch/x86/include/uapi/asm/hyperv.h
|
||||
F: arch/x86/kernel/cpu/mshyperv.c
|
||||
F: arch/x86/hyperv
|
||||
F: drivers/hid/hid-hyperv.c
|
||||
F: drivers/hv/
|
||||
F: drivers/input/serio/hyperv-keyboard.c
|
||||
@ -13071,7 +13072,7 @@ USERSPACE I/O (UIO)
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
|
||||
F: Documentation/DocBook/uio-howto.tmpl
|
||||
F: Documentation/driver-api/uio-howto.rst
|
||||
F: drivers/uio/
|
||||
F: include/linux/uio*.h
|
||||
|
||||
|
@ -557,15 +557,7 @@ static struct clk_lookup da850_clks[] = {
|
||||
CLK("da830-mmc.0", NULL, &mmcsd0_clk),
|
||||
CLK("da830-mmc.1", NULL, &mmcsd1_clk),
|
||||
CLK("ti-aemif", NULL, &aemif_clk),
|
||||
/*
|
||||
* The only user of this clock is davinci_nand and it get's it through
|
||||
* con_id. The nand node itself is created from within the aemif
|
||||
* driver to guarantee that it's probed after the aemif timing
|
||||
* parameters are configured. of_dev_auxdata is not accessible from
|
||||
* the aemif driver and can't be passed to of_platform_populate(). For
|
||||
* that reason we're leaving the dev_id here as NULL.
|
||||
*/
|
||||
CLK(NULL, "aemif", &aemif_nand_clk),
|
||||
CLK("davinci-nand.0", "aemif", &aemif_nand_clk),
|
||||
CLK("ohci-da8xx", "usb11", &usb11_clk),
|
||||
CLK("musb-da8xx", "usb20", &usb20_clk),
|
||||
CLK("spi_davinci.0", NULL, &spi0_clk),
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/platform_data/ti-aemif.h>
|
||||
|
||||
#include <asm/mach/arch.h>
|
||||
|
||||
@ -18,6 +19,15 @@
|
||||
#include "cp_intc.h"
|
||||
#include <mach/da8xx.h>
|
||||
|
||||
static struct of_dev_auxdata da850_aemif_auxdata_lookup[] = {
|
||||
OF_DEV_AUXDATA("ti,davinci-nand", 0x62000000, "davinci-nand.0", NULL),
|
||||
{}
|
||||
};
|
||||
|
||||
static struct aemif_platform_data aemif_data = {
|
||||
.dev_lookup = da850_aemif_auxdata_lookup,
|
||||
};
|
||||
|
||||
static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
|
||||
OF_DEV_AUXDATA("ti,davinci-i2c", 0x01c22000, "i2c_davinci.1", NULL),
|
||||
OF_DEV_AUXDATA("ti,davinci-i2c", 0x01e28000, "i2c_davinci.2", NULL),
|
||||
@ -37,7 +47,7 @@ static struct of_dev_auxdata da850_auxdata_lookup[] __initdata = {
|
||||
OF_DEV_AUXDATA("ti,davinci-dm6467-emac", 0x01e20000, "davinci_emac.1",
|
||||
NULL),
|
||||
OF_DEV_AUXDATA("ti,da830-mcasp-audio", 0x01d00000, "davinci-mcasp.0", NULL),
|
||||
OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", NULL),
|
||||
OF_DEV_AUXDATA("ti,da850-aemif", 0x68000000, "ti-aemif", &aemif_data),
|
||||
OF_DEV_AUXDATA("ti,da850-tilcdc", 0x01e13000, "da8xx_lcdc.0", NULL),
|
||||
OF_DEV_AUXDATA("ti,da830-ohci", 0x01e25000, "ohci-da8xx", NULL),
|
||||
OF_DEV_AUXDATA("ti,da830-musb", 0x01e00000, "musb-da8xx", NULL),
|
||||
|
@ -7,6 +7,9 @@ obj-$(CONFIG_KVM) += kvm/
|
||||
# Xen paravirtualization support
|
||||
obj-$(CONFIG_XEN) += xen/
|
||||
|
||||
# Hyper-V paravirtualization support
|
||||
obj-$(CONFIG_HYPERVISOR_GUEST) += hyperv/
|
||||
|
||||
# lguest paravirtualization support
|
||||
obj-$(CONFIG_LGUEST_GUEST) += lguest/
|
||||
|
||||
|
1
arch/x86/hyperv/Makefile
Normal file
1
arch/x86/hyperv/Makefile
Normal file
@ -0,0 +1 @@
|
||||
obj-y := hv_init.o
|
277
arch/x86/hyperv/hv_init.c
Normal file
277
arch/x86/hyperv/hv_init.c
Normal file
@ -0,0 +1,277 @@
|
||||
/*
|
||||
* X86 specific Hyper-V initialization code.
|
||||
*
|
||||
* Copyright (C) 2016, Microsoft, Inc.
|
||||
*
|
||||
* Author : K. Y. Srinivasan <kys@microsoft.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for more
|
||||
* details.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/hyperv.h>
|
||||
#include <asm/mshyperv.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/clockchips.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static struct ms_hyperv_tsc_page *tsc_pg;
|
||||
|
||||
static u64 read_hv_clock_tsc(struct clocksource *arg)
|
||||
{
|
||||
u64 current_tick;
|
||||
|
||||
if (tsc_pg->tsc_sequence != 0) {
|
||||
/*
|
||||
* Use the tsc page to compute the value.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
u64 tmp;
|
||||
u32 sequence = tsc_pg->tsc_sequence;
|
||||
u64 cur_tsc;
|
||||
u64 scale = tsc_pg->tsc_scale;
|
||||
s64 offset = tsc_pg->tsc_offset;
|
||||
|
||||
rdtscll(cur_tsc);
|
||||
/* current_tick = ((cur_tsc *scale) >> 64) + offset */
|
||||
asm("mulq %3"
|
||||
: "=d" (current_tick), "=a" (tmp)
|
||||
: "a" (cur_tsc), "r" (scale));
|
||||
|
||||
current_tick += offset;
|
||||
if (tsc_pg->tsc_sequence == sequence)
|
||||
return current_tick;
|
||||
|
||||
if (tsc_pg->tsc_sequence != 0)
|
||||
continue;
|
||||
/*
|
||||
* Fallback using MSR method.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
return current_tick;
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_tsc = {
|
||||
.name = "hyperv_clocksource_tsc_page",
|
||||
.rating = 400,
|
||||
.read = read_hv_clock_tsc,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
#endif
|
||||
|
||||
static u64 read_hv_clock_msr(struct clocksource *arg)
|
||||
{
|
||||
u64 current_tick;
|
||||
/*
|
||||
* Read the partition counter to get the current tick count. This count
|
||||
* is set to 0 when the partition is created and is incremented in
|
||||
* 100 nanosecond units.
|
||||
*/
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
return current_tick;
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_msr = {
|
||||
.name = "hyperv_clocksource_msr",
|
||||
.rating = 400,
|
||||
.read = read_hv_clock_msr,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static void *hypercall_pg;
|
||||
struct clocksource *hyperv_cs;
|
||||
EXPORT_SYMBOL_GPL(hyperv_cs);
|
||||
|
||||
/*
|
||||
* This function is to be invoked early in the boot sequence after the
|
||||
* hypervisor has been detected.
|
||||
*
|
||||
* 1. Setup the hypercall page.
|
||||
* 2. Register Hyper-V specific clocksource.
|
||||
*/
|
||||
void hyperv_init(void)
|
||||
{
|
||||
u64 guest_id;
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
if (x86_hyper != &x86_hyper_ms_hyperv)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Setup the hypercall page and enable hypercalls.
|
||||
* 1. Register the guest ID
|
||||
* 2. Enable the hypercall and register the hypercall page
|
||||
*/
|
||||
guest_id = generate_guest_id(0, LINUX_VERSION_CODE, 0);
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, guest_id);
|
||||
|
||||
hypercall_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_RX);
|
||||
if (hypercall_pg == NULL) {
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
hypercall_msr.enable = 1;
|
||||
hypercall_msr.guest_physical_address = vmalloc_to_pfn(hypercall_pg);
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
/*
|
||||
* Register Hyper-V specific clocksource.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
||||
union hv_x64_msr_hypercall_contents tsc_msr;
|
||||
|
||||
tsc_pg = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
|
||||
if (!tsc_pg)
|
||||
goto register_msr_cs;
|
||||
|
||||
hyperv_cs = &hyperv_cs_tsc;
|
||||
|
||||
rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
|
||||
tsc_msr.enable = 1;
|
||||
tsc_msr.guest_physical_address = vmalloc_to_pfn(tsc_pg);
|
||||
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* For 32 bit guests just use the MSR based mechanism for reading
|
||||
* the partition counter.
|
||||
*/
|
||||
|
||||
register_msr_cs:
|
||||
hyperv_cs = &hyperv_cs_msr;
|
||||
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
|
||||
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
|
||||
}
|
||||
|
||||
/*
|
||||
* This routine is called before kexec/kdump, it does the required cleanup.
|
||||
*/
|
||||
void hyperv_cleanup(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
/* Reset our OS id */
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
|
||||
/* Reset the hypercall page */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
/* Reset the TSC page */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hyperv_cleanup);
|
||||
|
||||
/*
|
||||
* hv_do_hypercall- Invoke the specified hypercall
|
||||
*/
|
||||
u64 hv_do_hypercall(u64 control, void *input, void *output)
|
||||
{
|
||||
u64 input_address = (input) ? virt_to_phys(input) : 0;
|
||||
u64 output_address = (output) ? virt_to_phys(output) : 0;
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 hv_status = 0;
|
||||
|
||||
if (!hypercall_pg)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
|
||||
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
|
||||
"c" (control), "d" (input_address),
|
||||
"m" (hypercall_pg));
|
||||
|
||||
return hv_status;
|
||||
|
||||
#else
|
||||
|
||||
u32 control_hi = control >> 32;
|
||||
u32 control_lo = control & 0xFFFFFFFF;
|
||||
u32 hv_status_hi = 1;
|
||||
u32 hv_status_lo = 1;
|
||||
u32 input_address_hi = input_address >> 32;
|
||||
u32 input_address_lo = input_address & 0xFFFFFFFF;
|
||||
u32 output_address_hi = output_address >> 32;
|
||||
u32 output_address_lo = output_address & 0xFFFFFFFF;
|
||||
|
||||
if (!hypercall_pg)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
|
||||
"=a"(hv_status_lo) : "d" (control_hi),
|
||||
"a" (control_lo), "b" (input_address_hi),
|
||||
"c" (input_address_lo), "D"(output_address_hi),
|
||||
"S"(output_address_lo), "m" (hypercall_pg));
|
||||
|
||||
return hv_status_lo | ((u64)hv_status_hi << 32);
|
||||
#endif /* !x86_64 */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_do_hypercall);
|
||||
|
||||
void hyperv_report_panic(struct pt_regs *regs)
|
||||
{
|
||||
static bool panic_reported;
|
||||
|
||||
/*
|
||||
* We prefer to report panic on 'die' chain as we have proper
|
||||
* registers to report, but if we miss it (e.g. on BUG()) we need
|
||||
* to report it on 'panic'.
|
||||
*/
|
||||
if (panic_reported)
|
||||
return;
|
||||
panic_reported = true;
|
||||
|
||||
wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
|
||||
|
||||
/*
|
||||
* Let Hyper-V know there is crash data available
|
||||
*/
|
||||
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hyperv_report_panic);
|
||||
|
||||
bool hv_is_hypercall_page_setup(void)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
/* Check if the hypercall page is setup */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
if (!hypercall_msr.enable)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_is_hypercall_page_setup);
|
@ -3,8 +3,28 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <asm/hyperv.h>
|
||||
|
||||
/*
|
||||
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
|
||||
* is set by CPUID(HVCPUID_VERSION_FEATURES).
|
||||
*/
|
||||
enum hv_cpuid_function {
|
||||
HVCPUID_VERSION_FEATURES = 0x00000001,
|
||||
HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
|
||||
HVCPUID_INTERFACE = 0x40000001,
|
||||
|
||||
/*
|
||||
* The remaining functions depend on the value of
|
||||
* HVCPUID_INTERFACE
|
||||
*/
|
||||
HVCPUID_VERSION = 0x40000002,
|
||||
HVCPUID_FEATURES = 0x40000003,
|
||||
HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
|
||||
HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
|
||||
};
|
||||
|
||||
struct ms_hyperv_info {
|
||||
u32 features;
|
||||
u32 misc_features;
|
||||
@ -13,6 +33,128 @@ struct ms_hyperv_info {
|
||||
|
||||
extern struct ms_hyperv_info ms_hyperv;
|
||||
|
||||
/*
|
||||
* Declare the MSR used to setup pages used to communicate with the hypervisor.
|
||||
*/
|
||||
union hv_x64_msr_hypercall_contents {
|
||||
u64 as_uint64;
|
||||
struct {
|
||||
u64 enable:1;
|
||||
u64 reserved:11;
|
||||
u64 guest_physical_address:52;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* TSC page layout.
|
||||
*/
|
||||
|
||||
struct ms_hyperv_tsc_page {
|
||||
volatile u32 tsc_sequence;
|
||||
u32 reserved1;
|
||||
volatile u64 tsc_scale;
|
||||
volatile s64 tsc_offset;
|
||||
u64 reserved2[509];
|
||||
};
|
||||
|
||||
/*
|
||||
* The guest OS needs to register the guest ID with the hypervisor.
|
||||
* The guest ID is a 64 bit entity and the structure of this ID is
|
||||
* specified in the Hyper-V specification:
|
||||
*
|
||||
* msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
|
||||
*
|
||||
* While the current guideline does not specify how Linux guest ID(s)
|
||||
* need to be generated, our plan is to publish the guidelines for
|
||||
* Linux and other guest operating systems that currently are hosted
|
||||
* on Hyper-V. The implementation here conforms to this yet
|
||||
* unpublished guidelines.
|
||||
*
|
||||
*
|
||||
* Bit(s)
|
||||
* 63 - Indicates if the OS is Open Source or not; 1 is Open Source
|
||||
* 62:56 - Os Type; Linux is 0x100
|
||||
* 55:48 - Distro specific identification
|
||||
* 47:16 - Linux kernel version number
|
||||
* 15:0 - Distro specific identification
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#define HV_LINUX_VENDOR_ID 0x8100
|
||||
|
||||
/*
|
||||
* Generate the guest ID based on the guideline described above.
|
||||
*/
|
||||
|
||||
static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
|
||||
__u64 d_info2)
|
||||
{
|
||||
__u64 guest_id = 0;
|
||||
|
||||
guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
|
||||
guest_id |= (d_info1 << 48);
|
||||
guest_id |= (kernel_version << 16);
|
||||
guest_id |= d_info2;
|
||||
|
||||
return guest_id;
|
||||
}
|
||||
|
||||
|
||||
/* Free the message slot and signal end-of-message if required */
|
||||
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
|
||||
{
|
||||
/*
|
||||
* On crash we're reading some other CPU's message page and we need
|
||||
* to be careful: this other CPU may already had cleared the header
|
||||
* and the host may already had delivered some other message there.
|
||||
* In case we blindly write msg->header.message_type we're going
|
||||
* to lose it. We can still lose a message of the same type but
|
||||
* we count on the fact that there can only be one
|
||||
* CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
|
||||
* on crash.
|
||||
*/
|
||||
if (cmpxchg(&msg->header.message_type, old_msg_type,
|
||||
HVMSG_NONE) != old_msg_type)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Make sure the write to MessageType (ie set to
|
||||
* HVMSG_NONE) happens before we read the
|
||||
* MessagePending and EOMing. Otherwise, the EOMing
|
||||
* will not deliver any more messages since there is
|
||||
* no empty slot
|
||||
*/
|
||||
mb();
|
||||
|
||||
if (msg->header.message_flags.msg_pending) {
|
||||
/*
|
||||
* This will cause message queue rescan to
|
||||
* possibly deliver another msg from the
|
||||
* hypervisor
|
||||
*/
|
||||
wrmsrl(HV_X64_MSR_EOM, 0);
|
||||
}
|
||||
}
|
||||
|
||||
#define hv_get_current_tick(tick) rdmsrl(HV_X64_MSR_TIME_REF_COUNT, tick)
|
||||
#define hv_init_timer(timer, tick) wrmsrl(timer, tick)
|
||||
#define hv_init_timer_config(config, val) wrmsrl(config, val)
|
||||
|
||||
#define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
|
||||
#define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
|
||||
|
||||
#define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
|
||||
#define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
|
||||
|
||||
#define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
|
||||
#define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
|
||||
|
||||
#define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
|
||||
|
||||
#define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
|
||||
#define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
|
||||
|
||||
void hyperv_callback_vector(void);
|
||||
#ifdef CONFIG_TRACING
|
||||
#define trace_hyperv_callback_vector hyperv_callback_vector
|
||||
@ -25,4 +167,13 @@ void hv_setup_kexec_handler(void (*handler)(void));
|
||||
void hv_remove_kexec_handler(void);
|
||||
void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
|
||||
void hv_remove_crash_handler(void);
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
extern struct clocksource *hyperv_cs;
|
||||
|
||||
void hyperv_init(void);
|
||||
void hyperv_report_panic(struct pt_regs *regs);
|
||||
bool hv_is_hypercall_page_setup(void);
|
||||
void hyperv_cleanup(void);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -73,6 +73,9 @@
|
||||
*/
|
||||
#define HV_X64_MSR_STAT_PAGES_AVAILABLE (1 << 8)
|
||||
|
||||
/* Crash MSR available */
|
||||
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE (1 << 10)
|
||||
|
||||
/*
|
||||
* Feature identification: EBX indicates which flags were specified at
|
||||
* partition creation. The format is the same as the partition creation
|
||||
@ -144,6 +147,11 @@
|
||||
*/
|
||||
#define HV_X64_RELAXED_TIMING_RECOMMENDED (1 << 5)
|
||||
|
||||
/*
|
||||
* Crash notification flag.
|
||||
*/
|
||||
#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
|
||||
|
||||
/* MSR used to identify the guest OS. */
|
||||
#define HV_X64_MSR_GUEST_OS_ID 0x40000000
|
||||
|
||||
|
@ -133,26 +133,6 @@ static uint32_t __init ms_hyperv_platform(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u64 read_hv_clock(struct clocksource *arg)
|
||||
{
|
||||
u64 current_tick;
|
||||
/*
|
||||
* Read the partition counter to get the current tick count. This count
|
||||
* is set to 0 when the partition is created and is incremented in
|
||||
* 100 nanosecond units.
|
||||
*/
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
return current_tick;
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs = {
|
||||
.name = "hyperv_clocksource",
|
||||
.rating = 400, /* use this when running on Hyperv*/
|
||||
.read = read_hv_clock,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static unsigned char hv_get_nmi_reason(void)
|
||||
{
|
||||
return 0;
|
||||
@ -180,6 +160,11 @@ static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
|
||||
|
||||
static void __init ms_hyperv_init_platform(void)
|
||||
{
|
||||
int hv_host_info_eax;
|
||||
int hv_host_info_ebx;
|
||||
int hv_host_info_ecx;
|
||||
int hv_host_info_edx;
|
||||
|
||||
/*
|
||||
* Extract the features and hints
|
||||
*/
|
||||
@ -190,6 +175,21 @@ static void __init ms_hyperv_init_platform(void)
|
||||
pr_info("HyperV: features 0x%x, hints 0x%x\n",
|
||||
ms_hyperv.features, ms_hyperv.hints);
|
||||
|
||||
/*
|
||||
* Extract host information.
|
||||
*/
|
||||
if (cpuid_eax(HVCPUID_VENDOR_MAXFUNCTION) >= HVCPUID_VERSION) {
|
||||
hv_host_info_eax = cpuid_eax(HVCPUID_VERSION);
|
||||
hv_host_info_ebx = cpuid_ebx(HVCPUID_VERSION);
|
||||
hv_host_info_ecx = cpuid_ecx(HVCPUID_VERSION);
|
||||
hv_host_info_edx = cpuid_edx(HVCPUID_VERSION);
|
||||
|
||||
pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d\n",
|
||||
hv_host_info_eax, hv_host_info_ebx >> 16,
|
||||
hv_host_info_ebx & 0xFFFF, hv_host_info_ecx,
|
||||
hv_host_info_edx >> 24, hv_host_info_edx & 0xFFFFFF);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (ms_hyperv.features & HV_X64_MSR_APIC_FREQUENCY_AVAILABLE) {
|
||||
/*
|
||||
@ -208,9 +208,6 @@ static void __init ms_hyperv_init_platform(void)
|
||||
"hv_nmi_unknown");
|
||||
#endif
|
||||
|
||||
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
|
||||
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
no_timer_check = 1;
|
||||
#endif
|
||||
@ -227,6 +224,13 @@ static void __init ms_hyperv_init_platform(void)
|
||||
*/
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
/*
|
||||
* Setup the hook to get control post apic initialization.
|
||||
*/
|
||||
x86_platform.apic_post_init = hyperv_init;
|
||||
#endif
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
|
@ -42,10 +42,22 @@ static struct resource goldfish_pdev_bus_resources[] = {
|
||||
}
|
||||
};
|
||||
|
||||
static bool goldfish_enable __initdata;
|
||||
|
||||
static int __init goldfish_setup(char *str)
|
||||
{
|
||||
goldfish_enable = true;
|
||||
return 0;
|
||||
}
|
||||
__setup("goldfish", goldfish_setup);
|
||||
|
||||
static int __init goldfish_init(void)
|
||||
{
|
||||
if (!goldfish_enable)
|
||||
return -ENODEV;
|
||||
|
||||
platform_device_register_simple("goldfish_pdev_bus", -1,
|
||||
goldfish_pdev_bus_resources, 2);
|
||||
goldfish_pdev_bus_resources, 2);
|
||||
return 0;
|
||||
}
|
||||
device_initcall(goldfish_init);
|
||||
|
@ -202,4 +202,6 @@ source "drivers/hwtracing/intel_th/Kconfig"
|
||||
|
||||
source "drivers/fpga/Kconfig"
|
||||
|
||||
source "drivers/fsi/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
@ -173,3 +173,4 @@ obj-$(CONFIG_STM) += hwtracing/stm/
|
||||
obj-$(CONFIG_ANDROID) += android/
|
||||
obj-$(CONFIG_NVMEM) += nvmem/
|
||||
obj-$(CONFIG_FPGA) += fpga/
|
||||
obj-$(CONFIG_FSI) += fsi/
|
||||
|
@ -19,6 +19,18 @@ config ANDROID_BINDER_IPC
|
||||
Android process, using Binder to identify, invoke and pass arguments
|
||||
between said processes.
|
||||
|
||||
config ANDROID_BINDER_DEVICES
|
||||
string "Android Binder devices"
|
||||
depends on ANDROID_BINDER_IPC
|
||||
default "binder"
|
||||
---help---
|
||||
Default value for the binder.devices parameter.
|
||||
|
||||
The binder.devices parameter is a comma-separated list of strings
|
||||
that specifies the names of the binder device nodes that will be
|
||||
created. Each binder device has its own context manager, and is
|
||||
therefore logically separated from the other devices.
|
||||
|
||||
config ANDROID_BINDER_IPC_32BIT
|
||||
bool
|
||||
depends on !64BIT && ANDROID_BINDER_IPC
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -56,14 +56,16 @@
|
||||
#define HT16K33_FB_SIZE (HT16K33_MATRIX_LED_MAX_COLS * BYTES_PER_ROW)
|
||||
|
||||
struct ht16k33_keypad {
|
||||
struct i2c_client *client;
|
||||
struct input_dev *dev;
|
||||
spinlock_t lock;
|
||||
struct delayed_work work;
|
||||
uint32_t cols;
|
||||
uint32_t rows;
|
||||
uint32_t row_shift;
|
||||
uint32_t debounce_ms;
|
||||
uint16_t last_key_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
|
||||
|
||||
wait_queue_head_t wait;
|
||||
bool stopped;
|
||||
};
|
||||
|
||||
struct ht16k33_fbdev {
|
||||
@ -78,7 +80,6 @@ struct ht16k33_priv {
|
||||
struct i2c_client *client;
|
||||
struct ht16k33_keypad keypad;
|
||||
struct ht16k33_fbdev fbdev;
|
||||
struct workqueue_struct *workqueue;
|
||||
};
|
||||
|
||||
static struct fb_fix_screeninfo ht16k33_fb_fix = {
|
||||
@ -124,16 +125,8 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
|
||||
{
|
||||
struct ht16k33_fbdev *fbdev = &priv->fbdev;
|
||||
|
||||
queue_delayed_work(priv->workqueue, &fbdev->work,
|
||||
msecs_to_jiffies(HZ / fbdev->refresh_rate));
|
||||
}
|
||||
|
||||
static void ht16k33_keypad_queue(struct ht16k33_priv *priv)
|
||||
{
|
||||
struct ht16k33_keypad *keypad = &priv->keypad;
|
||||
|
||||
queue_delayed_work(priv->workqueue, &keypad->work,
|
||||
msecs_to_jiffies(keypad->debounce_ms));
|
||||
schedule_delayed_work(&fbdev->work,
|
||||
msecs_to_jiffies(HZ / fbdev->refresh_rate));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -182,32 +175,6 @@ requeue:
|
||||
ht16k33_fb_queue(priv);
|
||||
}
|
||||
|
||||
static int ht16k33_keypad_start(struct input_dev *dev)
|
||||
{
|
||||
struct ht16k33_priv *priv = input_get_drvdata(dev);
|
||||
struct ht16k33_keypad *keypad = &priv->keypad;
|
||||
|
||||
/*
|
||||
* Schedule an immediate key scan to capture current key state;
|
||||
* columns will be activated and IRQs be enabled after the scan.
|
||||
*/
|
||||
queue_delayed_work(priv->workqueue, &keypad->work, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ht16k33_keypad_stop(struct input_dev *dev)
|
||||
{
|
||||
struct ht16k33_priv *priv = input_get_drvdata(dev);
|
||||
struct ht16k33_keypad *keypad = &priv->keypad;
|
||||
|
||||
cancel_delayed_work(&keypad->work);
|
||||
/*
|
||||
* ht16k33_keypad_scan() will leave IRQs enabled;
|
||||
* we should disable them now.
|
||||
*/
|
||||
disable_irq_nosync(priv->client->irq);
|
||||
}
|
||||
|
||||
static int ht16k33_initialize(struct ht16k33_priv *priv)
|
||||
{
|
||||
uint8_t byte;
|
||||
@ -233,61 +200,6 @@ static int ht16k33_initialize(struct ht16k33_priv *priv)
|
||||
return i2c_smbus_write_byte(priv->client, byte);
|
||||
}
|
||||
|
||||
/*
|
||||
* This gets the keys from keypad and reports it to input subsystem
|
||||
*/
|
||||
static void ht16k33_keypad_scan(struct work_struct *work)
|
||||
{
|
||||
struct ht16k33_keypad *keypad =
|
||||
container_of(work, struct ht16k33_keypad, work.work);
|
||||
struct ht16k33_priv *priv =
|
||||
container_of(keypad, struct ht16k33_priv, keypad);
|
||||
const unsigned short *keycodes = keypad->dev->keycode;
|
||||
uint16_t bits_changed, new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
|
||||
uint8_t data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
|
||||
int row, col, code;
|
||||
bool reschedule = false;
|
||||
|
||||
if (i2c_smbus_read_i2c_block_data(priv->client, 0x40, 6, data) != 6) {
|
||||
dev_err(&priv->client->dev, "Failed to read key data\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
for (col = 0; col < keypad->cols; col++) {
|
||||
new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
|
||||
if (new_state[col])
|
||||
reschedule = true;
|
||||
bits_changed = keypad->last_key_state[col] ^ new_state[col];
|
||||
|
||||
while (bits_changed) {
|
||||
row = ffs(bits_changed) - 1;
|
||||
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
|
||||
input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
|
||||
input_report_key(keypad->dev, keycodes[code],
|
||||
new_state[col] & BIT(row));
|
||||
bits_changed &= ~BIT(row);
|
||||
}
|
||||
}
|
||||
input_sync(keypad->dev);
|
||||
memcpy(keypad->last_key_state, new_state, sizeof(new_state));
|
||||
|
||||
end:
|
||||
if (reschedule)
|
||||
ht16k33_keypad_queue(priv);
|
||||
else
|
||||
enable_irq(priv->client->irq);
|
||||
}
|
||||
|
||||
static irqreturn_t ht16k33_irq_thread(int irq, void *dev)
|
||||
{
|
||||
struct ht16k33_priv *priv = dev;
|
||||
|
||||
disable_irq_nosync(priv->client->irq);
|
||||
ht16k33_keypad_queue(priv);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int ht16k33_bl_update_status(struct backlight_device *bl)
|
||||
{
|
||||
int brightness = bl->props.brightness;
|
||||
@ -334,15 +246,152 @@ static struct fb_ops ht16k33_fb_ops = {
|
||||
.fb_mmap = ht16k33_mmap,
|
||||
};
|
||||
|
||||
/*
|
||||
* This gets the keys from keypad and reports it to input subsystem.
|
||||
* Returns true if a key is pressed.
|
||||
*/
|
||||
static bool ht16k33_keypad_scan(struct ht16k33_keypad *keypad)
|
||||
{
|
||||
const unsigned short *keycodes = keypad->dev->keycode;
|
||||
u16 new_state[HT16K33_MATRIX_KEYPAD_MAX_COLS];
|
||||
u8 data[HT16K33_MATRIX_KEYPAD_MAX_COLS * 2];
|
||||
unsigned long bits_changed;
|
||||
int row, col, code;
|
||||
bool pressed = false;
|
||||
|
||||
if (i2c_smbus_read_i2c_block_data(keypad->client, 0x40, 6, data) != 6) {
|
||||
dev_err(&keypad->client->dev, "Failed to read key data\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (col = 0; col < keypad->cols; col++) {
|
||||
new_state[col] = (data[col * 2 + 1] << 8) | data[col * 2];
|
||||
if (new_state[col])
|
||||
pressed = true;
|
||||
bits_changed = keypad->last_key_state[col] ^ new_state[col];
|
||||
|
||||
for_each_set_bit(row, &bits_changed, BITS_PER_LONG) {
|
||||
code = MATRIX_SCAN_CODE(row, col, keypad->row_shift);
|
||||
input_event(keypad->dev, EV_MSC, MSC_SCAN, code);
|
||||
input_report_key(keypad->dev, keycodes[code],
|
||||
new_state[col] & BIT(row));
|
||||
}
|
||||
}
|
||||
input_sync(keypad->dev);
|
||||
memcpy(keypad->last_key_state, new_state, sizeof(new_state));
|
||||
|
||||
return pressed;
|
||||
}
|
||||
|
||||
static irqreturn_t ht16k33_keypad_irq_thread(int irq, void *dev)
|
||||
{
|
||||
struct ht16k33_keypad *keypad = dev;
|
||||
|
||||
do {
|
||||
wait_event_timeout(keypad->wait, keypad->stopped,
|
||||
msecs_to_jiffies(keypad->debounce_ms));
|
||||
if (keypad->stopped)
|
||||
break;
|
||||
} while (ht16k33_keypad_scan(keypad));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int ht16k33_keypad_start(struct input_dev *dev)
|
||||
{
|
||||
struct ht16k33_keypad *keypad = input_get_drvdata(dev);
|
||||
|
||||
keypad->stopped = false;
|
||||
mb();
|
||||
enable_irq(keypad->client->irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ht16k33_keypad_stop(struct input_dev *dev)
|
||||
{
|
||||
struct ht16k33_keypad *keypad = input_get_drvdata(dev);
|
||||
|
||||
keypad->stopped = true;
|
||||
mb();
|
||||
wake_up(&keypad->wait);
|
||||
disable_irq(keypad->client->irq);
|
||||
}
|
||||
|
||||
static int ht16k33_keypad_probe(struct i2c_client *client,
|
||||
struct ht16k33_keypad *keypad)
|
||||
{
|
||||
struct device_node *node = client->dev.of_node;
|
||||
u32 rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
|
||||
u32 cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
|
||||
int err;
|
||||
|
||||
keypad->client = client;
|
||||
init_waitqueue_head(&keypad->wait);
|
||||
|
||||
keypad->dev = devm_input_allocate_device(&client->dev);
|
||||
if (!keypad->dev)
|
||||
return -ENOMEM;
|
||||
|
||||
input_set_drvdata(keypad->dev, keypad);
|
||||
|
||||
keypad->dev->name = DRIVER_NAME"-keypad";
|
||||
keypad->dev->id.bustype = BUS_I2C;
|
||||
keypad->dev->open = ht16k33_keypad_start;
|
||||
keypad->dev->close = ht16k33_keypad_stop;
|
||||
|
||||
if (!of_get_property(node, "linux,no-autorepeat", NULL))
|
||||
__set_bit(EV_REP, keypad->dev->evbit);
|
||||
|
||||
err = of_property_read_u32(node, "debounce-delay-ms",
|
||||
&keypad->debounce_ms);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "key debounce delay not specified\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
keypad->rows = rows;
|
||||
keypad->cols = cols;
|
||||
keypad->row_shift = get_count_order(cols);
|
||||
|
||||
err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
|
||||
keypad->dev);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "failed to build keymap\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = devm_request_threaded_irq(&client->dev, client->irq,
|
||||
NULL, ht16k33_keypad_irq_thread,
|
||||
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
|
||||
DRIVER_NAME, keypad);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "irq request failed %d, error %d\n",
|
||||
client->irq, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
ht16k33_keypad_stop(keypad->dev);
|
||||
|
||||
err = input_register_device(keypad->dev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ht16k33_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
int err;
|
||||
uint32_t rows, cols, dft_brightness;
|
||||
uint32_t dft_brightness;
|
||||
struct backlight_device *bl;
|
||||
struct backlight_properties bl_props;
|
||||
struct ht16k33_priv *priv;
|
||||
struct ht16k33_keypad *keypad;
|
||||
struct ht16k33_fbdev *fbdev;
|
||||
struct device_node *node = client->dev.of_node;
|
||||
|
||||
@ -363,23 +412,16 @@ static int ht16k33_probe(struct i2c_client *client,
|
||||
priv->client = client;
|
||||
i2c_set_clientdata(client, priv);
|
||||
fbdev = &priv->fbdev;
|
||||
keypad = &priv->keypad;
|
||||
|
||||
priv->workqueue = create_singlethread_workqueue(DRIVER_NAME "-wq");
|
||||
if (priv->workqueue == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
err = ht16k33_initialize(priv);
|
||||
if (err)
|
||||
goto err_destroy_wq;
|
||||
return err;
|
||||
|
||||
/* Framebuffer (2 bytes per column) */
|
||||
BUILD_BUG_ON(PAGE_SIZE < HT16K33_FB_SIZE);
|
||||
fbdev->buffer = (unsigned char *) get_zeroed_page(GFP_KERNEL);
|
||||
if (!fbdev->buffer) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_fbdev;
|
||||
}
|
||||
if (!fbdev->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
fbdev->cache = devm_kmalloc(&client->dev, HT16K33_FB_SIZE, GFP_KERNEL);
|
||||
if (!fbdev->cache) {
|
||||
@ -415,59 +457,7 @@ static int ht16k33_probe(struct i2c_client *client,
|
||||
if (err)
|
||||
goto err_fbdev_info;
|
||||
|
||||
/* Keypad */
|
||||
keypad->dev = devm_input_allocate_device(&client->dev);
|
||||
if (!keypad->dev) {
|
||||
err = -ENOMEM;
|
||||
goto err_fbdev_unregister;
|
||||
}
|
||||
|
||||
keypad->dev->name = DRIVER_NAME"-keypad";
|
||||
keypad->dev->id.bustype = BUS_I2C;
|
||||
keypad->dev->open = ht16k33_keypad_start;
|
||||
keypad->dev->close = ht16k33_keypad_stop;
|
||||
|
||||
if (!of_get_property(node, "linux,no-autorepeat", NULL))
|
||||
__set_bit(EV_REP, keypad->dev->evbit);
|
||||
|
||||
err = of_property_read_u32(node, "debounce-delay-ms",
|
||||
&keypad->debounce_ms);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "key debounce delay not specified\n");
|
||||
goto err_fbdev_unregister;
|
||||
}
|
||||
|
||||
err = devm_request_threaded_irq(&client->dev, client->irq, NULL,
|
||||
ht16k33_irq_thread,
|
||||
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
|
||||
DRIVER_NAME, priv);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "irq request failed %d, error %d\n",
|
||||
client->irq, err);
|
||||
goto err_fbdev_unregister;
|
||||
}
|
||||
|
||||
disable_irq_nosync(client->irq);
|
||||
rows = HT16K33_MATRIX_KEYPAD_MAX_ROWS;
|
||||
cols = HT16K33_MATRIX_KEYPAD_MAX_COLS;
|
||||
err = matrix_keypad_parse_of_params(&client->dev, &rows, &cols);
|
||||
if (err)
|
||||
goto err_fbdev_unregister;
|
||||
|
||||
err = matrix_keypad_build_keymap(NULL, NULL, rows, cols, NULL,
|
||||
keypad->dev);
|
||||
if (err) {
|
||||
dev_err(&client->dev, "failed to build keymap\n");
|
||||
goto err_fbdev_unregister;
|
||||
}
|
||||
|
||||
input_set_drvdata(keypad->dev, priv);
|
||||
keypad->rows = rows;
|
||||
keypad->cols = cols;
|
||||
keypad->row_shift = get_count_order(cols);
|
||||
INIT_DELAYED_WORK(&keypad->work, ht16k33_keypad_scan);
|
||||
|
||||
err = input_register_device(keypad->dev);
|
||||
err = ht16k33_keypad_probe(client, &priv->keypad);
|
||||
if (err)
|
||||
goto err_fbdev_unregister;
|
||||
|
||||
@ -482,7 +472,7 @@ static int ht16k33_probe(struct i2c_client *client,
|
||||
if (IS_ERR(bl)) {
|
||||
dev_err(&client->dev, "failed to register backlight\n");
|
||||
err = PTR_ERR(bl);
|
||||
goto err_keypad_unregister;
|
||||
goto err_fbdev_unregister;
|
||||
}
|
||||
|
||||
err = of_property_read_u32(node, "default-brightness-level",
|
||||
@ -502,18 +492,12 @@ static int ht16k33_probe(struct i2c_client *client,
|
||||
ht16k33_fb_queue(priv);
|
||||
return 0;
|
||||
|
||||
err_keypad_unregister:
|
||||
input_unregister_device(keypad->dev);
|
||||
err_fbdev_unregister:
|
||||
unregister_framebuffer(fbdev->info);
|
||||
err_fbdev_info:
|
||||
framebuffer_release(fbdev->info);
|
||||
err_fbdev_buffer:
|
||||
free_page((unsigned long) fbdev->buffer);
|
||||
err_free_fbdev:
|
||||
kfree(fbdev);
|
||||
err_destroy_wq:
|
||||
destroy_workqueue(priv->workqueue);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -521,17 +505,13 @@ err_destroy_wq:
|
||||
static int ht16k33_remove(struct i2c_client *client)
|
||||
{
|
||||
struct ht16k33_priv *priv = i2c_get_clientdata(client);
|
||||
struct ht16k33_keypad *keypad = &priv->keypad;
|
||||
struct ht16k33_fbdev *fbdev = &priv->fbdev;
|
||||
|
||||
ht16k33_keypad_stop(keypad->dev);
|
||||
|
||||
cancel_delayed_work(&fbdev->work);
|
||||
unregister_framebuffer(fbdev->info);
|
||||
framebuffer_release(fbdev->info);
|
||||
free_page((unsigned long) fbdev->buffer);
|
||||
|
||||
destroy_workqueue(priv->workqueue);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -571,9 +571,12 @@ config TELCLOCK
|
||||
controlling the behavior of this hardware.
|
||||
|
||||
config DEVPORT
|
||||
bool
|
||||
bool "/dev/port character device"
|
||||
depends on ISA || PCI
|
||||
default y
|
||||
help
|
||||
Say Y here if you want to support the /dev/port device. The /dev/port
|
||||
device is similar to /dev/mem, but for I/O ports.
|
||||
|
||||
source "drivers/s390/char/Kconfig"
|
||||
|
||||
|
@ -31,13 +31,6 @@
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
|
||||
/*
|
||||
* The apm_bios device is one of the misc char devices.
|
||||
* This is its minor number.
|
||||
*/
|
||||
#define APM_MINOR_DEV 134
|
||||
|
||||
/*
|
||||
* One option can be changed at boot time as follows:
|
||||
* apm=on/off enable/disable APM
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -463,9 +463,9 @@ static int mmtimer_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
}
|
||||
|
||||
static struct miscdevice mmtimer_miscdev = {
|
||||
SGI_MMTIMER,
|
||||
MMTIMER_NAME,
|
||||
&mmtimer_fops
|
||||
.minor = SGI_MMTIMER,
|
||||
.name = MMTIMER_NAME,
|
||||
.fops = &mmtimer_fops
|
||||
};
|
||||
|
||||
static struct timespec sgi_clock_offset;
|
||||
|
@ -269,7 +269,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
|
||||
{
|
||||
int status;
|
||||
s32 buffer_count = 0;
|
||||
s32 num_writes = 0;
|
||||
bool dirty = false;
|
||||
u32 i;
|
||||
void __iomem *base_address = drvdata->base_address;
|
||||
@ -298,7 +297,6 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
|
||||
}
|
||||
|
||||
buffer_count = 0;
|
||||
num_writes++;
|
||||
dirty = false;
|
||||
}
|
||||
|
||||
@ -328,7 +326,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
|
||||
{
|
||||
int status;
|
||||
s32 buffer_count = 0;
|
||||
s32 read_count = 0;
|
||||
u32 i;
|
||||
void __iomem *base_address = drvdata->base_address;
|
||||
|
||||
@ -353,7 +350,6 @@ int buffer_icap_get_configuration(struct hwicap_drvdata *drvdata, u32 *data,
|
||||
}
|
||||
|
||||
buffer_count = 0;
|
||||
read_count++;
|
||||
}
|
||||
|
||||
/* Copy data from bram */
|
||||
|
@ -42,6 +42,16 @@ config EXTCON_GPIO
|
||||
Say Y here to enable GPIO based extcon support. Note that GPIO
|
||||
extcon supports single state per extcon instance.
|
||||
|
||||
config EXTCON_INTEL_INT3496
|
||||
tristate "Intel INT3496 ACPI device extcon driver"
|
||||
depends on GPIOLIB && ACPI
|
||||
help
|
||||
Say Y here to enable extcon support for USB OTG ports controlled by
|
||||
an Intel INT3496 ACPI device.
|
||||
|
||||
This ACPI device is typically found on Intel Baytrail or Cherrytrail
|
||||
based tablets, or other Baytrail / Cherrytrail devices.
|
||||
|
||||
config EXTCON_MAX14577
|
||||
tristate "Maxim MAX14577/77836 EXTCON Support"
|
||||
depends on MFD_MAX14577
|
||||
|
@ -8,6 +8,7 @@ obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
|
||||
obj-$(CONFIG_EXTCON_ARIZONA) += extcon-arizona.o
|
||||
obj-$(CONFIG_EXTCON_AXP288) += extcon-axp288.o
|
||||
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
|
||||
obj-$(CONFIG_EXTCON_INTEL_INT3496) += extcon-intel-int3496.o
|
||||
obj-$(CONFIG_EXTCON_MAX14577) += extcon-max14577.o
|
||||
obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
|
||||
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
|
||||
|
@ -14,7 +14,7 @@
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/extcon.h>
|
||||
#include "extcon.h"
|
||||
|
||||
static int devm_extcon_dev_match(struct device *dev, void *res, void *data)
|
||||
{
|
||||
|
@ -67,7 +67,7 @@ static void adc_jack_handler(struct work_struct *work)
|
||||
|
||||
ret = iio_read_channel_raw(data->chan, &adc_val);
|
||||
if (ret < 0) {
|
||||
dev_err(&data->edev->dev, "read channel() error: %d\n", ret);
|
||||
dev_err(data->dev, "read channel() error: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -236,12 +236,8 @@ static void arizona_extcon_set_mode(struct arizona_extcon_info *info, int mode)
|
||||
|
||||
mode %= info->micd_num_modes;
|
||||
|
||||
if (arizona->pdata.micd_pol_gpio > 0)
|
||||
gpio_set_value_cansleep(arizona->pdata.micd_pol_gpio,
|
||||
info->micd_modes[mode].gpio);
|
||||
else
|
||||
gpiod_set_value_cansleep(info->micd_pol_gpio,
|
||||
info->micd_modes[mode].gpio);
|
||||
gpiod_set_value_cansleep(info->micd_pol_gpio,
|
||||
info->micd_modes[mode].gpio);
|
||||
|
||||
regmap_update_bits(arizona->regmap, ARIZONA_MIC_DETECT_1,
|
||||
ARIZONA_MICD_BIAS_SRC_MASK,
|
||||
@ -1412,21 +1408,21 @@ static int arizona_extcon_probe(struct platform_device *pdev)
|
||||
regmap_update_bits(arizona->regmap, ARIZONA_GP_SWITCH_1,
|
||||
ARIZONA_SW1_MODE_MASK, arizona->pdata.gpsw);
|
||||
|
||||
if (arizona->pdata.micd_pol_gpio > 0) {
|
||||
if (pdata->micd_pol_gpio > 0) {
|
||||
if (info->micd_modes[0].gpio)
|
||||
mode = GPIOF_OUT_INIT_HIGH;
|
||||
else
|
||||
mode = GPIOF_OUT_INIT_LOW;
|
||||
|
||||
ret = devm_gpio_request_one(&pdev->dev,
|
||||
arizona->pdata.micd_pol_gpio,
|
||||
mode,
|
||||
"MICD polarity");
|
||||
ret = devm_gpio_request_one(&pdev->dev, pdata->micd_pol_gpio,
|
||||
mode, "MICD polarity");
|
||||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to request GPIO%d: %d\n",
|
||||
arizona->pdata.micd_pol_gpio, ret);
|
||||
pdata->micd_pol_gpio, ret);
|
||||
goto err_register;
|
||||
}
|
||||
|
||||
info->micd_pol_gpio = gpio_to_desc(pdata->micd_pol_gpio);
|
||||
} else {
|
||||
if (info->micd_modes[0].gpio)
|
||||
mode = GPIOD_OUT_HIGH;
|
||||
|
@ -21,7 +21,6 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/usb/phy.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/extcon.h>
|
||||
#include <linux/regmap.h>
|
||||
@ -71,12 +70,6 @@
|
||||
#define DET_STAT_CDP 2
|
||||
#define DET_STAT_DCP 3
|
||||
|
||||
/* IRQ enable-1 register */
|
||||
#define PWRSRC_IRQ_CFG_MASK (BIT(4)|BIT(3)|BIT(2))
|
||||
|
||||
/* IRQ enable-6 register */
|
||||
#define BC12_IRQ_CFG_MASK BIT(1)
|
||||
|
||||
enum axp288_extcon_reg {
|
||||
AXP288_PS_STAT_REG = 0x00,
|
||||
AXP288_PS_BOOT_REASON_REG = 0x02,
|
||||
@ -84,8 +77,6 @@ enum axp288_extcon_reg {
|
||||
AXP288_BC_VBUS_CNTL_REG = 0x2d,
|
||||
AXP288_BC_USB_STAT_REG = 0x2e,
|
||||
AXP288_BC_DET_STAT_REG = 0x2f,
|
||||
AXP288_PWRSRC_IRQ_CFG_REG = 0x40,
|
||||
AXP288_BC12_IRQ_CFG_REG = 0x45,
|
||||
};
|
||||
|
||||
enum axp288_mux_select {
|
||||
@ -105,6 +96,7 @@ static const unsigned int axp288_extcon_cables[] = {
|
||||
EXTCON_CHG_USB_SDP,
|
||||
EXTCON_CHG_USB_CDP,
|
||||
EXTCON_CHG_USB_DCP,
|
||||
EXTCON_USB,
|
||||
EXTCON_NONE,
|
||||
};
|
||||
|
||||
@ -112,11 +104,11 @@ struct axp288_extcon_info {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
struct regmap_irq_chip_data *regmap_irqc;
|
||||
struct axp288_extcon_pdata *pdata;
|
||||
struct gpio_desc *gpio_mux_cntl;
|
||||
int irq[EXTCON_IRQ_END];
|
||||
struct extcon_dev *edev;
|
||||
struct notifier_block extcon_nb;
|
||||
struct usb_phy *otg;
|
||||
unsigned int previous_cable;
|
||||
};
|
||||
|
||||
/* Power up/down reason string array */
|
||||
@ -156,10 +148,9 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
|
||||
|
||||
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
|
||||
{
|
||||
static bool notify_otg, notify_charger;
|
||||
static unsigned int cable;
|
||||
int ret, stat, cfg, pwr_stat;
|
||||
u8 chrg_type;
|
||||
unsigned int cable = info->previous_cable;
|
||||
bool vbus_attach = false;
|
||||
|
||||
ret = regmap_read(info->regmap, AXP288_PS_STAT_REG, &pwr_stat);
|
||||
@ -168,9 +159,9 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
|
||||
return ret;
|
||||
}
|
||||
|
||||
vbus_attach = (pwr_stat & PS_STAT_VBUS_PRESENT);
|
||||
vbus_attach = (pwr_stat & PS_STAT_VBUS_VALID);
|
||||
if (!vbus_attach)
|
||||
goto notify_otg;
|
||||
goto no_vbus;
|
||||
|
||||
/* Check charger detection completion status */
|
||||
ret = regmap_read(info->regmap, AXP288_BC_GLOBAL_REG, &cfg);
|
||||
@ -190,19 +181,14 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
|
||||
switch (chrg_type) {
|
||||
case DET_STAT_SDP:
|
||||
dev_dbg(info->dev, "sdp cable is connected\n");
|
||||
notify_otg = true;
|
||||
notify_charger = true;
|
||||
cable = EXTCON_CHG_USB_SDP;
|
||||
break;
|
||||
case DET_STAT_CDP:
|
||||
dev_dbg(info->dev, "cdp cable is connected\n");
|
||||
notify_otg = true;
|
||||
notify_charger = true;
|
||||
cable = EXTCON_CHG_USB_CDP;
|
||||
break;
|
||||
case DET_STAT_DCP:
|
||||
dev_dbg(info->dev, "dcp cable is connected\n");
|
||||
notify_charger = true;
|
||||
cable = EXTCON_CHG_USB_DCP;
|
||||
break;
|
||||
default:
|
||||
@ -210,27 +196,28 @@ static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
|
||||
"disconnect or unknown or ID event\n");
|
||||
}
|
||||
|
||||
notify_otg:
|
||||
if (notify_otg) {
|
||||
/*
|
||||
* If VBUS is absent Connect D+/D- lines to PMIC for BC
|
||||
* detection. Else connect them to SOC for USB communication.
|
||||
*/
|
||||
if (info->pdata->gpio_mux_cntl)
|
||||
gpiod_set_value(info->pdata->gpio_mux_cntl,
|
||||
vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
|
||||
: EXTCON_GPIO_MUX_SEL_PMIC);
|
||||
no_vbus:
|
||||
/*
|
||||
* If VBUS is absent Connect D+/D- lines to PMIC for BC
|
||||
* detection. Else connect them to SOC for USB communication.
|
||||
*/
|
||||
if (info->gpio_mux_cntl)
|
||||
gpiod_set_value(info->gpio_mux_cntl,
|
||||
vbus_attach ? EXTCON_GPIO_MUX_SEL_SOC
|
||||
: EXTCON_GPIO_MUX_SEL_PMIC);
|
||||
|
||||
atomic_notifier_call_chain(&info->otg->notifier,
|
||||
vbus_attach ? USB_EVENT_VBUS : USB_EVENT_NONE, NULL);
|
||||
}
|
||||
extcon_set_state_sync(info->edev, info->previous_cable, false);
|
||||
if (info->previous_cable == EXTCON_CHG_USB_SDP)
|
||||
extcon_set_state_sync(info->edev, EXTCON_USB, false);
|
||||
|
||||
if (notify_charger)
|
||||
if (vbus_attach) {
|
||||
extcon_set_state_sync(info->edev, cable, vbus_attach);
|
||||
if (cable == EXTCON_CHG_USB_SDP)
|
||||
extcon_set_state_sync(info->edev, EXTCON_USB,
|
||||
vbus_attach);
|
||||
|
||||
/* Clear the flags on disconnect event */
|
||||
if (!vbus_attach)
|
||||
notify_otg = notify_charger = false;
|
||||
info->previous_cable = cable;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
@ -253,15 +240,10 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void axp288_extcon_enable_irq(struct axp288_extcon_info *info)
|
||||
static void axp288_extcon_enable(struct axp288_extcon_info *info)
|
||||
{
|
||||
/* Unmask VBUS interrupt */
|
||||
regmap_write(info->regmap, AXP288_PWRSRC_IRQ_CFG_REG,
|
||||
PWRSRC_IRQ_CFG_MASK);
|
||||
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
|
||||
BC_GLOBAL_RUN, 0);
|
||||
/* Unmask the BC1.2 complete interrupts */
|
||||
regmap_write(info->regmap, AXP288_BC12_IRQ_CFG_REG, BC12_IRQ_CFG_MASK);
|
||||
/* Enable the charger detection logic */
|
||||
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
|
||||
BC_GLOBAL_RUN, BC_GLOBAL_RUN);
|
||||
@ -271,6 +253,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct axp288_extcon_info *info;
|
||||
struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent);
|
||||
struct axp288_extcon_pdata *pdata = pdev->dev.platform_data;
|
||||
int ret, i, pirq, gpio;
|
||||
|
||||
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
|
||||
@ -280,15 +263,10 @@ static int axp288_extcon_probe(struct platform_device *pdev)
|
||||
info->dev = &pdev->dev;
|
||||
info->regmap = axp20x->regmap;
|
||||
info->regmap_irqc = axp20x->regmap_irqc;
|
||||
info->pdata = pdev->dev.platform_data;
|
||||
info->previous_cable = EXTCON_NONE;
|
||||
if (pdata)
|
||||
info->gpio_mux_cntl = pdata->gpio_mux_cntl;
|
||||
|
||||
if (!info->pdata) {
|
||||
/* Try ACPI provided pdata via device properties */
|
||||
if (!device_property_present(&pdev->dev,
|
||||
"axp288_extcon_data\n"))
|
||||
dev_err(&pdev->dev, "failed to get platform data\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
platform_set_drvdata(pdev, info);
|
||||
|
||||
axp288_extcon_log_rsi(info);
|
||||
@ -308,23 +286,16 @@ static int axp288_extcon_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get otg transceiver phy */
|
||||
info->otg = devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
|
||||
if (IS_ERR(info->otg)) {
|
||||
dev_err(&pdev->dev, "failed to get otg transceiver\n");
|
||||
return PTR_ERR(info->otg);
|
||||
}
|
||||
|
||||
/* Set up gpio control for USB Mux */
|
||||
if (info->pdata->gpio_mux_cntl) {
|
||||
gpio = desc_to_gpio(info->pdata->gpio_mux_cntl);
|
||||
if (info->gpio_mux_cntl) {
|
||||
gpio = desc_to_gpio(info->gpio_mux_cntl);
|
||||
ret = devm_gpio_request(&pdev->dev, gpio, "USB_MUX");
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"failed to request the gpio=%d\n", gpio);
|
||||
return ret;
|
||||
}
|
||||
gpiod_direction_output(info->pdata->gpio_mux_cntl,
|
||||
gpiod_direction_output(info->gpio_mux_cntl,
|
||||
EXTCON_GPIO_MUX_SEL_PMIC);
|
||||
}
|
||||
|
||||
@ -349,14 +320,21 @@ static int axp288_extcon_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
/* Enable interrupts */
|
||||
axp288_extcon_enable_irq(info);
|
||||
/* Start charger cable type detection */
|
||||
axp288_extcon_enable(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct platform_device_id axp288_extcon_table[] = {
|
||||
{ .name = "axp288_extcon" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(platform, axp288_extcon_table);
|
||||
|
||||
static struct platform_driver axp288_extcon_driver = {
|
||||
.probe = axp288_extcon_probe,
|
||||
.id_table = axp288_extcon_table,
|
||||
.driver = {
|
||||
.name = "axp288_extcon",
|
||||
},
|
||||
|
179
drivers/extcon/extcon-intel-int3496.c
Normal file
179
drivers/extcon/extcon-intel-int3496.c
Normal file
@ -0,0 +1,179 @@
|
||||
/*
|
||||
* Intel INT3496 ACPI device extcon driver
|
||||
*
|
||||
* Copyright (c) 2016 Hans de Goede <hdegoede@redhat.com>
|
||||
*
|
||||
* Based on android x86 kernel code which is:
|
||||
*
|
||||
* Copyright (c) 2014, Intel Corporation.
|
||||
* Author: David Cohen <david.a.cohen@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/extcon.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
||||
#define INT3496_GPIO_USB_ID 0
|
||||
#define INT3496_GPIO_VBUS_EN 1
|
||||
#define INT3496_GPIO_USB_MUX 2
|
||||
#define DEBOUNCE_TIME msecs_to_jiffies(50)
|
||||
|
||||
struct int3496_data {
|
||||
struct device *dev;
|
||||
struct extcon_dev *edev;
|
||||
struct delayed_work work;
|
||||
struct gpio_desc *gpio_usb_id;
|
||||
struct gpio_desc *gpio_vbus_en;
|
||||
struct gpio_desc *gpio_usb_mux;
|
||||
int usb_id_irq;
|
||||
};
|
||||
|
||||
static const unsigned int int3496_cable[] = {
|
||||
EXTCON_USB_HOST,
|
||||
EXTCON_NONE,
|
||||
};
|
||||
|
||||
static void int3496_do_usb_id(struct work_struct *work)
|
||||
{
|
||||
struct int3496_data *data =
|
||||
container_of(work, struct int3496_data, work.work);
|
||||
int id = gpiod_get_value_cansleep(data->gpio_usb_id);
|
||||
|
||||
/* id == 1: PERIPHERAL, id == 0: HOST */
|
||||
dev_dbg(data->dev, "Connected %s cable\n", id ? "PERIPHERAL" : "HOST");
|
||||
|
||||
/*
|
||||
* Peripheral: set USB mux to peripheral and disable VBUS
|
||||
* Host: set USB mux to host and enable VBUS
|
||||
*/
|
||||
if (!IS_ERR(data->gpio_usb_mux))
|
||||
gpiod_direction_output(data->gpio_usb_mux, id);
|
||||
|
||||
if (!IS_ERR(data->gpio_vbus_en))
|
||||
gpiod_direction_output(data->gpio_vbus_en, !id);
|
||||
|
||||
extcon_set_state_sync(data->edev, EXTCON_USB_HOST, !id);
|
||||
}
|
||||
|
||||
static irqreturn_t int3496_thread_isr(int irq, void *priv)
|
||||
{
|
||||
struct int3496_data *data = priv;
|
||||
|
||||
/* Let the pin settle before processing it */
|
||||
mod_delayed_work(system_wq, &data->work, DEBOUNCE_TIME);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int int3496_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct int3496_data *data;
|
||||
int ret;
|
||||
|
||||
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->dev = dev;
|
||||
INIT_DELAYED_WORK(&data->work, int3496_do_usb_id);
|
||||
|
||||
data->gpio_usb_id = devm_gpiod_get_index(dev, "id",
|
||||
INT3496_GPIO_USB_ID,
|
||||
GPIOD_IN);
|
||||
if (IS_ERR(data->gpio_usb_id)) {
|
||||
ret = PTR_ERR(data->gpio_usb_id);
|
||||
dev_err(dev, "can't request USB ID GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id);
|
||||
if (data->usb_id_irq <= 0) {
|
||||
dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en",
|
||||
INT3496_GPIO_VBUS_EN,
|
||||
GPIOD_ASIS);
|
||||
if (IS_ERR(data->gpio_vbus_en))
|
||||
dev_info(dev, "can't request VBUS EN GPIO\n");
|
||||
|
||||
data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux",
|
||||
INT3496_GPIO_USB_MUX,
|
||||
GPIOD_ASIS);
|
||||
if (IS_ERR(data->gpio_usb_mux))
|
||||
dev_info(dev, "can't request USB MUX GPIO\n");
|
||||
|
||||
/* register extcon device */
|
||||
data->edev = devm_extcon_dev_allocate(dev, int3496_cable);
|
||||
if (IS_ERR(data->edev))
|
||||
return -ENOMEM;
|
||||
|
||||
ret = devm_extcon_dev_register(dev, data->edev);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "can't register extcon device: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = devm_request_threaded_irq(dev, data->usb_id_irq,
|
||||
NULL, int3496_thread_isr,
|
||||
IRQF_SHARED | IRQF_ONESHOT |
|
||||
IRQF_TRIGGER_RISING |
|
||||
IRQF_TRIGGER_FALLING,
|
||||
dev_name(dev), data);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "can't request IRQ for USB ID GPIO: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* queue initial processing of id-pin */
|
||||
queue_delayed_work(system_wq, &data->work, 0);
|
||||
|
||||
platform_set_drvdata(pdev, data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int int3496_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct int3496_data *data = platform_get_drvdata(pdev);
|
||||
|
||||
devm_free_irq(&pdev->dev, data->usb_id_irq, data);
|
||||
cancel_delayed_work_sync(&data->work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct acpi_device_id int3496_acpi_match[] = {
|
||||
{ "INT3496" },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, int3496_acpi_match);
|
||||
|
||||
static struct platform_driver int3496_driver = {
|
||||
.driver = {
|
||||
.name = "intel-int3496",
|
||||
.acpi_match_table = int3496_acpi_match,
|
||||
},
|
||||
.probe = int3496_probe,
|
||||
.remove = int3496_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(int3496_driver);
|
||||
|
||||
MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
|
||||
MODULE_DESCRIPTION("Intel INT3496 ACPI device extcon driver");
|
||||
MODULE_LICENSE("GPL");
|
@ -531,8 +531,10 @@ static int max14577_parse_irq(struct max14577_muic_info *info, int irq_type)
|
||||
case MAX14577_IRQ_INT1_ADC:
|
||||
case MAX14577_IRQ_INT1_ADCLOW:
|
||||
case MAX14577_IRQ_INT1_ADCERR:
|
||||
/* Handle all of accessory except for
|
||||
type of charger accessory */
|
||||
/*
|
||||
* Handle all of accessory except for
|
||||
* type of charger accessory.
|
||||
*/
|
||||
info->irq_adc = true;
|
||||
return 1;
|
||||
case MAX14577_IRQ_INT2_CHGTYP:
|
||||
|
@ -188,8 +188,10 @@ enum max77693_muic_acc_type {
|
||||
MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE,
|
||||
MAX77693_MUIC_ADC_OPEN,
|
||||
|
||||
/* The below accessories have same ADC value so ADCLow and
|
||||
ADC1K bit is used to separate specific accessory */
|
||||
/*
|
||||
* The below accessories have same ADC value so ADCLow and
|
||||
* ADC1K bit is used to separate specific accessory.
|
||||
*/
|
||||
/* ADC|VBVolot|ADCLow|ADC1K| */
|
||||
MAX77693_MUIC_GND_USB_HOST = 0x100, /* 0x0| 0| 0| 0| */
|
||||
MAX77693_MUIC_GND_USB_HOST_VB = 0x104, /* 0x0| 1| 0| 0| */
|
||||
@ -970,8 +972,10 @@ static void max77693_muic_irq_work(struct work_struct *work)
|
||||
case MAX77693_MUIC_IRQ_INT1_ADC_LOW:
|
||||
case MAX77693_MUIC_IRQ_INT1_ADC_ERR:
|
||||
case MAX77693_MUIC_IRQ_INT1_ADC1K:
|
||||
/* Handle all of accessory except for
|
||||
type of charger accessory */
|
||||
/*
|
||||
* Handle all of accessory except for
|
||||
* type of charger accessory.
|
||||
*/
|
||||
ret = max77693_muic_adc_handler(info);
|
||||
break;
|
||||
case MAX77693_MUIC_IRQ_INT2_CHGTYP:
|
||||
|
@ -97,8 +97,10 @@ enum max77843_muic_accessory_type {
|
||||
MAX77843_MUIC_ADC_AUDIO_DEVICE_TYPE1,
|
||||
MAX77843_MUIC_ADC_OPEN,
|
||||
|
||||
/* The blow accessories should check
|
||||
not only ADC value but also ADC1K and VBVolt value. */
|
||||
/*
|
||||
* The below accessories should check
|
||||
* not only ADC value but also ADC1K and VBVolt value.
|
||||
*/
|
||||
/* Offset|ADC1K|VBVolt| */
|
||||
MAX77843_MUIC_GND_USB_HOST = 0x100, /* 0x1| 0| 0| */
|
||||
MAX77843_MUIC_GND_USB_HOST_VB = 0x101, /* 0x1| 0| 1| */
|
||||
@ -265,16 +267,20 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
|
||||
/* Check GROUND accessory with charger cable */
|
||||
if (adc == MAX77843_MUIC_ADC_GROUND) {
|
||||
if (chg_type == MAX77843_MUIC_CHG_NONE) {
|
||||
/* The following state when charger cable is
|
||||
/*
|
||||
* The following state when charger cable is
|
||||
* disconnected but the GROUND accessory still
|
||||
* connected */
|
||||
* connected.
|
||||
*/
|
||||
*attached = false;
|
||||
cable_type = info->prev_chg_type;
|
||||
info->prev_chg_type = MAX77843_MUIC_CHG_NONE;
|
||||
} else {
|
||||
|
||||
/* The following state when charger cable is
|
||||
* connected on the GROUND accessory */
|
||||
/*
|
||||
* The following state when charger cable is
|
||||
* connected on the GROUND accessory.
|
||||
*/
|
||||
*attached = true;
|
||||
cable_type = MAX77843_MUIC_CHG_GND;
|
||||
info->prev_chg_type = MAX77843_MUIC_CHG_GND;
|
||||
@ -299,11 +305,13 @@ static int max77843_muic_get_cable_type(struct max77843_muic_info *info,
|
||||
} else {
|
||||
*attached = true;
|
||||
|
||||
/* Offset|ADC1K|VBVolt|
|
||||
/*
|
||||
* Offset|ADC1K|VBVolt|
|
||||
* 0x1| 0| 0| USB-HOST
|
||||
* 0x1| 0| 1| USB-HOST with VB
|
||||
* 0x1| 1| 0| MHL
|
||||
* 0x1| 1| 1| MHL with VB */
|
||||
* 0x1| 1| 1| MHL with VB
|
||||
*/
|
||||
/* Get ADC1K register bit */
|
||||
gnd_type = (info->status[MAX77843_MUIC_STATUS1] &
|
||||
MAX77843_MUIC_STATUS1_ADC1K_MASK);
|
||||
|
@ -62,7 +62,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
|
||||
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
|
||||
extcon_set_state_sync(edev, EXTCON_USB, true);
|
||||
dev_info(palmas_usb->dev, "USB cable is attached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB cable is attached\n");
|
||||
} else {
|
||||
dev_dbg(palmas_usb->dev,
|
||||
"Spurious connect event detected\n");
|
||||
@ -71,7 +71,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
|
||||
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
|
||||
extcon_set_state_sync(edev, EXTCON_USB, false);
|
||||
dev_info(palmas_usb->dev, "USB cable is detached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB cable is detached\n");
|
||||
} else {
|
||||
dev_dbg(palmas_usb->dev,
|
||||
"Spurious disconnect event detected\n");
|
||||
@ -99,7 +99,7 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
|
||||
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
|
||||
} else if ((set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) &&
|
||||
(id_src & PALMAS_USB_ID_INT_SRC_ID_FLOAT)) {
|
||||
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
|
||||
@ -107,17 +107,17 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
|
||||
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
|
||||
(!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
} else if ((palmas_usb->linkstat == PALMAS_USB_STATE_DISCONNECT) &&
|
||||
(id_src & PALMAS_USB_ID_INT_SRC_ID_GND)) {
|
||||
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
|
||||
dev_info(palmas_usb->dev, " USB-HOST cable is attached\n");
|
||||
dev_dbg(palmas_usb->dev, " USB-HOST cable is attached\n");
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -138,10 +138,10 @@ static void palmas_gpio_id_detect(struct work_struct *work)
|
||||
|
||||
if (id) {
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, false);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB-HOST cable is detached\n");
|
||||
} else {
|
||||
extcon_set_state_sync(edev, EXTCON_USB_HOST, true);
|
||||
dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
|
||||
dev_dbg(palmas_usb->dev, "USB-HOST cable is attached\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,6 +190,11 @@ static int palmas_usb_probe(struct platform_device *pdev)
|
||||
struct palmas_usb *palmas_usb;
|
||||
int status;
|
||||
|
||||
if (!palmas) {
|
||||
dev_err(&pdev->dev, "failed to get valid parent\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
|
||||
if (!palmas_usb)
|
||||
return -ENOMEM;
|
||||
|
@ -142,8 +142,10 @@ enum rt8973a_muic_acc_type {
|
||||
RT8973A_MUIC_ADC_UNKNOWN_ACC_5,
|
||||
RT8973A_MUIC_ADC_OPEN = 0x1f,
|
||||
|
||||
/* The below accessories has same ADC value (0x1f).
|
||||
So, Device type1 is used to separate specific accessory. */
|
||||
/*
|
||||
* The below accessories has same ADC value (0x1f).
|
||||
* So, Device type1 is used to separate specific accessory.
|
||||
*/
|
||||
/* |---------|--ADC| */
|
||||
/* | [7:5]|[4:0]| */
|
||||
RT8973A_MUIC_ADC_USB = 0x3f, /* | 001|11111| */
|
||||
|
@ -135,8 +135,10 @@ enum sm5502_muic_acc_type {
|
||||
SM5502_MUIC_ADC_AUDIO_TYPE1,
|
||||
SM5502_MUIC_ADC_OPEN = 0x1f,
|
||||
|
||||
/* The below accessories have same ADC value (0x1f or 0x1e).
|
||||
So, Device type1 is used to separate specific accessory. */
|
||||
/*
|
||||
* The below accessories have same ADC value (0x1f or 0x1e).
|
||||
* So, Device type1 is used to separate specific accessory.
|
||||
*/
|
||||
/* |---------|--ADC| */
|
||||
/* | [7:5]|[4:0]| */
|
||||
SM5502_MUIC_ADC_AUDIO_TYPE1_FULL_REMOTE = 0x3e, /* | 001|11110| */
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
|
||||
#define USB_GPIO_DEBOUNCE_MS 20 /* ms */
|
||||
|
||||
@ -245,6 +246,9 @@ static int usb_extcon_suspend(struct device *dev)
|
||||
if (info->vbus_gpiod)
|
||||
disable_irq(info->vbus_irq);
|
||||
|
||||
if (!device_may_wakeup(dev))
|
||||
pinctrl_pm_select_sleep_state(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -253,6 +257,9 @@ static int usb_extcon_resume(struct device *dev)
|
||||
struct usb_extcon_info *info = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (!device_may_wakeup(dev))
|
||||
pinctrl_pm_select_default_state(dev);
|
||||
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (info->id_gpiod) {
|
||||
ret = disable_irq_wake(info->id_irq);
|
||||
|
@ -30,11 +30,12 @@
|
||||
#include <linux/device.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/extcon.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sysfs.h>
|
||||
|
||||
#include "extcon.h"
|
||||
|
||||
#define SUPPORTED_CABLE_MAX 32
|
||||
#define CABLE_NAME_MAX 30
|
||||
|
||||
@ -59,7 +60,7 @@ struct __extcon_info {
|
||||
[EXTCON_USB_HOST] = {
|
||||
.type = EXTCON_TYPE_USB,
|
||||
.id = EXTCON_USB_HOST,
|
||||
.name = "USB_HOST",
|
||||
.name = "USB-HOST",
|
||||
},
|
||||
|
||||
/* Charging external connector */
|
||||
@ -98,6 +99,11 @@ struct __extcon_info {
|
||||
.id = EXTCON_CHG_WPT,
|
||||
.name = "WPT",
|
||||
},
|
||||
[EXTCON_CHG_USB_PD] = {
|
||||
.type = EXTCON_TYPE_CHG | EXTCON_TYPE_USB,
|
||||
.id = EXTCON_CHG_USB_PD,
|
||||
.name = "PD",
|
||||
},
|
||||
|
||||
/* Jack external connector */
|
||||
[EXTCON_JACK_MICROPHONE] = {
|
||||
@ -906,35 +912,16 @@ int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
|
||||
unsigned long flags;
|
||||
int ret, idx = -EINVAL;
|
||||
|
||||
if (!nb)
|
||||
if (!edev || !nb)
|
||||
return -EINVAL;
|
||||
|
||||
if (edev) {
|
||||
idx = find_cable_index_by_id(edev, id);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
idx = find_cable_index_by_id(edev, id);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
spin_lock_irqsave(&edev->lock, flags);
|
||||
ret = raw_notifier_chain_register(&edev->nh[idx], nb);
|
||||
spin_unlock_irqrestore(&edev->lock, flags);
|
||||
} else {
|
||||
struct extcon_dev *extd;
|
||||
|
||||
mutex_lock(&extcon_dev_list_lock);
|
||||
list_for_each_entry(extd, &extcon_dev_list, entry) {
|
||||
idx = find_cable_index_by_id(extd, id);
|
||||
if (idx >= 0)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&extcon_dev_list_lock);
|
||||
|
||||
if (idx >= 0) {
|
||||
edev = extd;
|
||||
return extcon_register_notifier(extd, id, nb);
|
||||
} else {
|
||||
ret = -ENODEV;
|
||||
}
|
||||
}
|
||||
spin_lock_irqsave(&edev->lock, flags);
|
||||
ret = raw_notifier_chain_register(&edev->nh[idx], nb);
|
||||
spin_unlock_irqrestore(&edev->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
62
drivers/extcon/extcon.h
Normal file
62
drivers/extcon/extcon.h
Normal file
@ -0,0 +1,62 @@
|
||||
#ifndef __LINUX_EXTCON_INTERNAL_H__
|
||||
#define __LINUX_EXTCON_INTERNAL_H__
|
||||
|
||||
#include <linux/extcon.h>
|
||||
|
||||
/**
|
||||
* struct extcon_dev - An extcon device represents one external connector.
|
||||
* @name: The name of this extcon device. Parent device name is
|
||||
* used if NULL.
|
||||
* @supported_cable: Array of supported cable names ending with EXTCON_NONE.
|
||||
* If supported_cable is NULL, cable name related APIs
|
||||
* are disabled.
|
||||
* @mutually_exclusive: Array of mutually exclusive set of cables that cannot
|
||||
* be attached simultaneously. The array should be
|
||||
* ending with NULL or be NULL (no mutually exclusive
|
||||
* cables). For example, if it is { 0x7, 0x30, 0}, then,
|
||||
* {0, 1}, {0, 1, 2}, {0, 2}, {1, 2}, or {4, 5} cannot
|
||||
* be attached simulataneously. {0x7, 0} is equivalent to
|
||||
* {0x3, 0x6, 0x5, 0}. If it is {0xFFFFFFFF, 0}, there
|
||||
* can be no simultaneous connections.
|
||||
* @dev: Device of this extcon.
|
||||
* @state: Attach/detach state of this extcon. Do not provide at
|
||||
* register-time.
|
||||
* @nh: Notifier for the state change events from this extcon
|
||||
* @entry: To support list of extcon devices so that users can
|
||||
* search for extcon devices based on the extcon name.
|
||||
* @lock:
|
||||
* @max_supported: Internal value to store the number of cables.
|
||||
* @extcon_dev_type: Device_type struct to provide attribute_groups
|
||||
* customized for each extcon device.
|
||||
* @cables: Sysfs subdirectories. Each represents one cable.
|
||||
*
|
||||
* In most cases, users only need to provide "User initializing data" of
|
||||
* this struct when registering an extcon. In some exceptional cases,
|
||||
* optional callbacks may be needed. However, the values in "internal data"
|
||||
* are overwritten by register function.
|
||||
*/
|
||||
struct extcon_dev {
|
||||
/* Optional user initializing data */
|
||||
const char *name;
|
||||
const unsigned int *supported_cable;
|
||||
const u32 *mutually_exclusive;
|
||||
|
||||
/* Internal data. Please do not set. */
|
||||
struct device dev;
|
||||
struct raw_notifier_head *nh;
|
||||
struct list_head entry;
|
||||
int max_supported;
|
||||
spinlock_t lock; /* could be called by irq handler */
|
||||
u32 state;
|
||||
|
||||
/* /sys/class/extcon/.../cable.n/... */
|
||||
struct device_type extcon_dev_type;
|
||||
struct extcon_cable *cables;
|
||||
|
||||
/* /sys/class/extcon/.../mutually_exclusive/... */
|
||||
struct attribute_group attr_g_muex;
|
||||
struct attribute **attrs_muex;
|
||||
struct device_attribute *d_attrs_muex;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_EXTCON_INTERNAL_H__ */
|
@ -25,16 +25,106 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
static DEFINE_IDA(fpga_mgr_ida);
|
||||
static struct class *fpga_mgr_class;
|
||||
|
||||
/*
|
||||
* Call the low level driver's write_init function. This will do the
|
||||
* device-specific things to get the FPGA into the state where it is ready to
|
||||
* receive an FPGA image. The low level driver only gets to see the first
|
||||
* initial_header_size bytes in the buffer.
|
||||
*/
|
||||
static int fpga_mgr_write_init_buf(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
|
||||
if (!mgr->mops->initial_header_size)
|
||||
ret = mgr->mops->write_init(mgr, info, NULL, 0);
|
||||
else
|
||||
ret = mgr->mops->write_init(
|
||||
mgr, info, buf, min(mgr->mops->initial_header_size, count));
|
||||
|
||||
if (ret) {
|
||||
dev_err(&mgr->dev, "Error preparing FPGA for writing\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fpga_mgr_write_init_sg(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct sg_mapping_iter miter;
|
||||
size_t len;
|
||||
char *buf;
|
||||
int ret;
|
||||
|
||||
if (!mgr->mops->initial_header_size)
|
||||
return fpga_mgr_write_init_buf(mgr, info, NULL, 0);
|
||||
|
||||
/*
|
||||
* First try to use miter to map the first fragment to access the
|
||||
* header, this is the typical path.
|
||||
*/
|
||||
sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
|
||||
if (sg_miter_next(&miter) &&
|
||||
miter.length >= mgr->mops->initial_header_size) {
|
||||
ret = fpga_mgr_write_init_buf(mgr, info, miter.addr,
|
||||
miter.length);
|
||||
sg_miter_stop(&miter);
|
||||
return ret;
|
||||
}
|
||||
sg_miter_stop(&miter);
|
||||
|
||||
/* Otherwise copy the fragments into temporary memory. */
|
||||
buf = kmalloc(mgr->mops->initial_header_size, GFP_KERNEL);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
len = sg_copy_to_buffer(sgt->sgl, sgt->nents, buf,
|
||||
mgr->mops->initial_header_size);
|
||||
ret = fpga_mgr_write_init_buf(mgr, info, buf, len);
|
||||
|
||||
kfree(buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* After all the FPGA image has been written, do the device specific steps to
|
||||
* finish and set the FPGA into operating mode.
|
||||
*/
|
||||
static int fpga_mgr_write_complete(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
|
||||
ret = mgr->mops->write_complete(mgr, info);
|
||||
if (ret) {
|
||||
dev_err(&mgr->dev, "Error after writing image data to FPGA\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
|
||||
return ret;
|
||||
}
|
||||
mgr->state = FPGA_MGR_STATE_OPERATING;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* fpga_mgr_buf_load - load fpga from image in buffer
|
||||
* fpga_mgr_buf_load_sg - load fpga from image in buffer from a scatter list
|
||||
* @mgr: fpga manager
|
||||
* @info: fpga image specific information
|
||||
* @buf: buffer contain fpga image
|
||||
* @count: byte count of buf
|
||||
* @sgt: scatterlist table
|
||||
*
|
||||
* Step the low level fpga manager through the device-specific steps of getting
|
||||
* an FPGA ready to be configured, writing the image to it, then doing whatever
|
||||
@ -42,54 +132,139 @@ static struct class *fpga_mgr_class;
|
||||
* mgr pointer from of_fpga_mgr_get() or fpga_mgr_get() and checked that it is
|
||||
* not an error code.
|
||||
*
|
||||
* This is the preferred entry point for FPGA programming, it does not require
|
||||
* any contiguous kernel memory.
|
||||
*
|
||||
* Return: 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
struct sg_table *sgt)
|
||||
{
|
||||
struct device *dev = &mgr->dev;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Call the low level driver's write_init function. This will do the
|
||||
* device-specific things to get the FPGA into the state where it is
|
||||
* ready to receive an FPGA image. The low level driver only gets to
|
||||
* see the first initial_header_size bytes in the buffer.
|
||||
*/
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_INIT;
|
||||
ret = mgr->mops->write_init(mgr, info, buf,
|
||||
min(mgr->mops->initial_header_size, count));
|
||||
ret = fpga_mgr_write_init_sg(mgr, info, sgt);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Write the FPGA image to the FPGA. */
|
||||
mgr->state = FPGA_MGR_STATE_WRITE;
|
||||
if (mgr->mops->write_sg) {
|
||||
ret = mgr->mops->write_sg(mgr, sgt);
|
||||
} else {
|
||||
struct sg_mapping_iter miter;
|
||||
|
||||
sg_miter_start(&miter, sgt->sgl, sgt->nents, SG_MITER_FROM_SG);
|
||||
while (sg_miter_next(&miter)) {
|
||||
ret = mgr->mops->write(mgr, miter.addr, miter.length);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
sg_miter_stop(&miter);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
dev_err(dev, "Error preparing FPGA for writing\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_INIT_ERR;
|
||||
dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_ERR;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return fpga_mgr_write_complete(mgr, info);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpga_mgr_buf_load_sg);
|
||||
|
||||
static int fpga_mgr_buf_load_mapped(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = fpga_mgr_write_init_buf(mgr, info, buf, count);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Write the FPGA image to the FPGA.
|
||||
*/
|
||||
mgr->state = FPGA_MGR_STATE_WRITE;
|
||||
ret = mgr->mops->write(mgr, buf, count);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error while writing image data to FPGA\n");
|
||||
dev_err(&mgr->dev, "Error while writing image data to FPGA\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_ERR;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* After all the FPGA image has been written, do the device specific
|
||||
* steps to finish and set the FPGA into operating mode.
|
||||
*/
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE;
|
||||
ret = mgr->mops->write_complete(mgr, info);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error after writing image data to FPGA\n");
|
||||
mgr->state = FPGA_MGR_STATE_WRITE_COMPLETE_ERR;
|
||||
return ret;
|
||||
}
|
||||
mgr->state = FPGA_MGR_STATE_OPERATING;
|
||||
return fpga_mgr_write_complete(mgr, info);
|
||||
}
|
||||
|
||||
return 0;
|
||||
/**
|
||||
* fpga_mgr_buf_load - load fpga from image in buffer
|
||||
* @mgr: fpga manager
|
||||
* @flags: flags setting fpga confuration modes
|
||||
* @buf: buffer contain fpga image
|
||||
* @count: byte count of buf
|
||||
*
|
||||
* Step the low level fpga manager through the device-specific steps of getting
|
||||
* an FPGA ready to be configured, writing the image to it, then doing whatever
|
||||
* post-configuration steps necessary. This code assumes the caller got the
|
||||
* mgr pointer from of_fpga_mgr_get() and checked that it is not an error code.
|
||||
*
|
||||
* Return: 0 on success, negative error code otherwise.
|
||||
*/
|
||||
int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct page **pages;
|
||||
struct sg_table sgt;
|
||||
const void *p;
|
||||
int nr_pages;
|
||||
int index;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* This is just a fast path if the caller has already created a
|
||||
* contiguous kernel buffer and the driver doesn't require SG, non-SG
|
||||
* drivers will still work on the slow path.
|
||||
*/
|
||||
if (mgr->mops->write)
|
||||
return fpga_mgr_buf_load_mapped(mgr, info, buf, count);
|
||||
|
||||
/*
|
||||
* Convert the linear kernel pointer into a sg_table of pages for use
|
||||
* by the driver.
|
||||
*/
|
||||
nr_pages = DIV_ROUND_UP((unsigned long)buf + count, PAGE_SIZE) -
|
||||
(unsigned long)buf / PAGE_SIZE;
|
||||
pages = kmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
p = buf - offset_in_page(buf);
|
||||
for (index = 0; index < nr_pages; index++) {
|
||||
if (is_vmalloc_addr(p))
|
||||
pages[index] = vmalloc_to_page(p);
|
||||
else
|
||||
pages[index] = kmap_to_page((void *)p);
|
||||
if (!pages[index]) {
|
||||
kfree(pages);
|
||||
return -EFAULT;
|
||||
}
|
||||
p += PAGE_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The temporary pages list is used to code share the merging algorithm
|
||||
* in sg_alloc_table_from_pages
|
||||
*/
|
||||
rc = sg_alloc_table_from_pages(&sgt, pages, index, offset_in_page(buf),
|
||||
count, GFP_KERNEL);
|
||||
kfree(pages);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = fpga_mgr_buf_load_sg(mgr, info, &sgt);
|
||||
sg_free_table(&sgt);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fpga_mgr_buf_load);
|
||||
|
||||
@ -291,8 +466,9 @@ int fpga_mgr_register(struct device *dev, const char *name,
|
||||
struct fpga_manager *mgr;
|
||||
int id, ret;
|
||||
|
||||
if (!mops || !mops->write_init || !mops->write ||
|
||||
!mops->write_complete || !mops->state) {
|
||||
if (!mops || !mops->write_complete || !mops->state ||
|
||||
!mops->write_init || (!mops->write && !mops->write_sg) ||
|
||||
(mops->write && mops->write_sg)) {
|
||||
dev_err(dev, "Attempt to register without fpga_manager_ops\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <linux/pm.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/* Offsets into SLCR regmap */
|
||||
|
||||
@ -80,6 +81,7 @@
|
||||
|
||||
/* FPGA init status */
|
||||
#define STATUS_DMA_Q_F BIT(31)
|
||||
#define STATUS_DMA_Q_E BIT(30)
|
||||
#define STATUS_PCFG_INIT_MASK BIT(4)
|
||||
|
||||
/* Interrupt Status/Mask Register Bit definitions */
|
||||
@ -89,7 +91,7 @@
|
||||
#define IXR_D_P_DONE_MASK BIT(12)
|
||||
/* FPGA programmed */
|
||||
#define IXR_PCFG_DONE_MASK BIT(2)
|
||||
#define IXR_ERROR_FLAGS_MASK 0x00F0F860
|
||||
#define IXR_ERROR_FLAGS_MASK 0x00F0C860
|
||||
#define IXR_ALL_MASK 0xF8F7F87F
|
||||
|
||||
/* Miscellaneous constant values */
|
||||
@ -98,12 +100,16 @@
|
||||
#define DMA_INVALID_ADDRESS GENMASK(31, 0)
|
||||
/* Used to unlock the dev */
|
||||
#define UNLOCK_MASK 0x757bdf0d
|
||||
/* Timeout for DMA to complete */
|
||||
#define DMA_DONE_TIMEOUT msecs_to_jiffies(1000)
|
||||
/* Timeout for polling reset bits */
|
||||
#define INIT_POLL_TIMEOUT 2500000
|
||||
/* Delay for polling reset bits */
|
||||
#define INIT_POLL_DELAY 20
|
||||
/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
|
||||
* interrupting
|
||||
*/
|
||||
#define DMA_SRC_LAST_TRANSFER 1
|
||||
/* Timeout for DMA completion */
|
||||
#define DMA_TIMEOUT_MS 5000
|
||||
|
||||
/* Masks for controlling stuff in SLCR */
|
||||
/* Disable all Level shifters */
|
||||
@ -124,6 +130,11 @@ struct zynq_fpga_priv {
|
||||
void __iomem *io_base;
|
||||
struct regmap *slcr;
|
||||
|
||||
spinlock_t dma_lock;
|
||||
unsigned int dma_elm;
|
||||
unsigned int dma_nelms;
|
||||
struct scatterlist *cur_sg;
|
||||
|
||||
struct completion dma_done;
|
||||
};
|
||||
|
||||
@ -143,37 +154,104 @@ static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
|
||||
readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
|
||||
timeout_us)
|
||||
|
||||
static void zynq_fpga_mask_irqs(struct zynq_fpga_priv *priv)
|
||||
/* Cause the specified irq mask bits to generate IRQs */
|
||||
static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
|
||||
{
|
||||
u32 intr_mask;
|
||||
|
||||
intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
|
||||
zynq_fpga_write(priv, INT_MASK_OFFSET,
|
||||
intr_mask | IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
|
||||
zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
|
||||
}
|
||||
|
||||
static void zynq_fpga_unmask_irqs(struct zynq_fpga_priv *priv)
|
||||
/* Must be called with dma_lock held */
|
||||
static void zynq_step_dma(struct zynq_fpga_priv *priv)
|
||||
{
|
||||
u32 intr_mask;
|
||||
u32 addr;
|
||||
u32 len;
|
||||
bool first;
|
||||
|
||||
intr_mask = zynq_fpga_read(priv, INT_MASK_OFFSET);
|
||||
zynq_fpga_write(priv, INT_MASK_OFFSET,
|
||||
intr_mask
|
||||
& ~(IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK));
|
||||
first = priv->dma_elm == 0;
|
||||
while (priv->cur_sg) {
|
||||
/* Feed the DMA queue until it is full. */
|
||||
if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
|
||||
break;
|
||||
|
||||
addr = sg_dma_address(priv->cur_sg);
|
||||
len = sg_dma_len(priv->cur_sg);
|
||||
if (priv->dma_elm + 1 == priv->dma_nelms) {
|
||||
/* The last transfer waits for the PCAP to finish too,
|
||||
* notice this also changes the irq_mask to ignore
|
||||
* IXR_DMA_DONE_MASK which ensures we do not trigger
|
||||
* the completion too early.
|
||||
*/
|
||||
addr |= DMA_SRC_LAST_TRANSFER;
|
||||
priv->cur_sg = NULL;
|
||||
} else {
|
||||
priv->cur_sg = sg_next(priv->cur_sg);
|
||||
priv->dma_elm++;
|
||||
}
|
||||
|
||||
zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
|
||||
zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
|
||||
zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
|
||||
zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
|
||||
}
|
||||
|
||||
/* Once the first transfer is queued we can turn on the ISR, future
|
||||
* calls to zynq_step_dma will happen from the ISR context. The
|
||||
* dma_lock spinlock guarentees this handover is done coherently, the
|
||||
* ISR enable is put at the end to avoid another CPU spinning in the
|
||||
* ISR on this lock.
|
||||
*/
|
||||
if (first && priv->cur_sg) {
|
||||
zynq_fpga_set_irq(priv,
|
||||
IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
|
||||
} else if (!priv->cur_sg) {
|
||||
/* The last transfer changes to DMA & PCAP mode since we do
|
||||
* not want to continue until everything has been flushed into
|
||||
* the PCAP.
|
||||
*/
|
||||
zynq_fpga_set_irq(priv,
|
||||
IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t zynq_fpga_isr(int irq, void *data)
|
||||
{
|
||||
struct zynq_fpga_priv *priv = data;
|
||||
u32 intr_status;
|
||||
|
||||
/* disable DMA and error IRQs */
|
||||
zynq_fpga_mask_irqs(priv);
|
||||
/* If anything other than DMA completion is reported stop and hand
|
||||
* control back to zynq_fpga_ops_write, something went wrong,
|
||||
* otherwise progress the DMA.
|
||||
*/
|
||||
spin_lock(&priv->dma_lock);
|
||||
intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
|
||||
if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
|
||||
(intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
|
||||
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
|
||||
zynq_step_dma(priv);
|
||||
spin_unlock(&priv->dma_lock);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
spin_unlock(&priv->dma_lock);
|
||||
|
||||
zynq_fpga_set_irq(priv, 0);
|
||||
complete(&priv->dma_done);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Sanity check the proposed bitstream. It must start with the sync word in
|
||||
* the correct byte order, and be dword aligned. The input is a Xilinx .bin
|
||||
* file with every 32 bit quantity swapped.
|
||||
*/
|
||||
static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
|
||||
{
|
||||
for (; count >= 4; buf += 4, count -= 4)
|
||||
if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
|
||||
buf[3] == 0xaa)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
struct fpga_image_info *info,
|
||||
const char *buf, size_t count)
|
||||
@ -190,6 +268,13 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
|
||||
/* don't globally reset PL if we're doing partial reconfig */
|
||||
if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
|
||||
if (!zynq_fpga_has_sync(buf, count)) {
|
||||
dev_err(&mgr->dev,
|
||||
"Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
|
||||
err = -EINVAL;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* assert AXI interface resets */
|
||||
regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
|
||||
FPGA_RST_ALL_MASK);
|
||||
@ -259,10 +344,11 @@ static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
|
||||
zynq_fpga_write(priv, CTRL_OFFSET,
|
||||
(CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK | ctrl));
|
||||
|
||||
/* check that we have room in the command queue */
|
||||
/* We expect that the command queue is empty right now. */
|
||||
status = zynq_fpga_read(priv, STATUS_OFFSET);
|
||||
if (status & STATUS_DMA_Q_F) {
|
||||
dev_err(&mgr->dev, "DMA command queue full\n");
|
||||
if ((status & STATUS_DMA_Q_F) ||
|
||||
(status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
|
||||
dev_err(&mgr->dev, "DMA command queue not right\n");
|
||||
err = -EBUSY;
|
||||
goto out_err;
|
||||
}
|
||||
@ -281,26 +367,36 @@ out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int zynq_fpga_ops_write(struct fpga_manager *mgr,
|
||||
const char *buf, size_t count)
|
||||
static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
|
||||
{
|
||||
struct zynq_fpga_priv *priv;
|
||||
const char *why;
|
||||
int err;
|
||||
char *kbuf;
|
||||
size_t in_count;
|
||||
dma_addr_t dma_addr;
|
||||
u32 transfer_length;
|
||||
u32 intr_status;
|
||||
unsigned long timeout;
|
||||
unsigned long flags;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
in_count = count;
|
||||
priv = mgr->priv;
|
||||
|
||||
kbuf =
|
||||
dma_alloc_coherent(mgr->dev.parent, count, &dma_addr, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
return -ENOMEM;
|
||||
/* The hardware can only DMA multiples of 4 bytes, and it requires the
|
||||
* starting addresses to be aligned to 64 bits (UG585 pg 212).
|
||||
*/
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
if ((sg->offset % 8) || (sg->length % 4)) {
|
||||
dev_err(&mgr->dev,
|
||||
"Invalid bitstream, chunks must be aligned\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
memcpy(kbuf, buf, count);
|
||||
priv->dma_nelms =
|
||||
dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
|
||||
if (priv->dma_nelms == 0) {
|
||||
dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* enable clock */
|
||||
err = clk_enable(priv->clk);
|
||||
@ -308,38 +404,67 @@ static int zynq_fpga_ops_write(struct fpga_manager *mgr,
|
||||
goto out_free;
|
||||
|
||||
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
|
||||
|
||||
reinit_completion(&priv->dma_done);
|
||||
|
||||
/* enable DMA and error IRQs */
|
||||
zynq_fpga_unmask_irqs(priv);
|
||||
/* zynq_step_dma will turn on interrupts */
|
||||
spin_lock_irqsave(&priv->dma_lock, flags);
|
||||
priv->dma_elm = 0;
|
||||
priv->cur_sg = sgt->sgl;
|
||||
zynq_step_dma(priv);
|
||||
spin_unlock_irqrestore(&priv->dma_lock, flags);
|
||||
|
||||
/* the +1 in the src addr is used to hold off on DMA_DONE IRQ
|
||||
* until both AXI and PCAP are done ...
|
||||
*/
|
||||
zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, (u32)(dma_addr) + 1);
|
||||
zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, (u32)DMA_INVALID_ADDRESS);
|
||||
timeout = wait_for_completion_timeout(&priv->dma_done,
|
||||
msecs_to_jiffies(DMA_TIMEOUT_MS));
|
||||
|
||||
/* convert #bytes to #words */
|
||||
transfer_length = (count + 3) / 4;
|
||||
|
||||
zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, transfer_length);
|
||||
zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
|
||||
|
||||
wait_for_completion(&priv->dma_done);
|
||||
spin_lock_irqsave(&priv->dma_lock, flags);
|
||||
zynq_fpga_set_irq(priv, 0);
|
||||
priv->cur_sg = NULL;
|
||||
spin_unlock_irqrestore(&priv->dma_lock, flags);
|
||||
|
||||
intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
|
||||
zynq_fpga_write(priv, INT_STS_OFFSET, intr_status);
|
||||
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
|
||||
|
||||
if (!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
|
||||
dev_err(&mgr->dev, "Error configuring FPGA\n");
|
||||
err = -EFAULT;
|
||||
/* There doesn't seem to be a way to force cancel any DMA, so if
|
||||
* something went wrong we are relying on the hardware to have halted
|
||||
* the DMA before we get here, if there was we could use
|
||||
* wait_for_completion_interruptible too.
|
||||
*/
|
||||
|
||||
if (intr_status & IXR_ERROR_FLAGS_MASK) {
|
||||
why = "DMA reported error";
|
||||
err = -EIO;
|
||||
goto out_report;
|
||||
}
|
||||
|
||||
if (priv->cur_sg ||
|
||||
!((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
|
||||
if (timeout == 0)
|
||||
why = "DMA timed out";
|
||||
else
|
||||
why = "DMA did not complete";
|
||||
err = -EIO;
|
||||
goto out_report;
|
||||
}
|
||||
|
||||
err = 0;
|
||||
goto out_clk;
|
||||
|
||||
out_report:
|
||||
dev_err(&mgr->dev,
|
||||
"%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
|
||||
why,
|
||||
intr_status,
|
||||
zynq_fpga_read(priv, CTRL_OFFSET),
|
||||
zynq_fpga_read(priv, LOCK_OFFSET),
|
||||
zynq_fpga_read(priv, INT_MASK_OFFSET),
|
||||
zynq_fpga_read(priv, STATUS_OFFSET),
|
||||
zynq_fpga_read(priv, MCTRL_OFFSET));
|
||||
|
||||
out_clk:
|
||||
clk_disable(priv->clk);
|
||||
|
||||
out_free:
|
||||
dma_free_coherent(mgr->dev.parent, count, kbuf, dma_addr);
|
||||
dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -400,9 +525,10 @@ static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
|
||||
}
|
||||
|
||||
static const struct fpga_manager_ops zynq_fpga_ops = {
|
||||
.initial_header_size = 128,
|
||||
.state = zynq_fpga_ops_state,
|
||||
.write_init = zynq_fpga_ops_write_init,
|
||||
.write = zynq_fpga_ops_write,
|
||||
.write_sg = zynq_fpga_ops_write,
|
||||
.write_complete = zynq_fpga_ops_write_complete,
|
||||
};
|
||||
|
||||
@ -416,6 +542,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&priv->dma_lock);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->io_base = devm_ioremap_resource(dev, res);
|
||||
@ -452,7 +579,7 @@ static int zynq_fpga_probe(struct platform_device *pdev)
|
||||
/* unlock the device */
|
||||
zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
|
||||
|
||||
zynq_fpga_write(priv, INT_MASK_OFFSET, 0xFFFFFFFF);
|
||||
zynq_fpga_set_irq(priv, 0);
|
||||
zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
|
||||
err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
|
||||
priv);
|
||||
|
12
drivers/fsi/Kconfig
Normal file
12
drivers/fsi/Kconfig
Normal file
@ -0,0 +1,12 @@
|
||||
#
|
||||
# FSI subsystem
|
||||
#
|
||||
|
||||
menu "FSI support"
|
||||
|
||||
config FSI
|
||||
tristate "FSI support"
|
||||
---help---
|
||||
FSI - the FRU Support Interface - is a simple bus for low-level
|
||||
access to POWER-based hardware.
|
||||
endmenu
|
2
drivers/fsi/Makefile
Normal file
2
drivers/fsi/Makefile
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
obj-$(CONFIG_FSI) += fsi-core.o
|
59
drivers/fsi/fsi-core.c
Normal file
59
drivers/fsi/fsi-core.c
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* FSI core driver
|
||||
*
|
||||
* Copyright (C) IBM Corporation 2016
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/fsi.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* FSI core & Linux bus type definitions */
|
||||
|
||||
static int fsi_bus_match(struct device *dev, struct device_driver *drv)
|
||||
{
|
||||
struct fsi_device *fsi_dev = to_fsi_dev(dev);
|
||||
struct fsi_driver *fsi_drv = to_fsi_drv(drv);
|
||||
const struct fsi_device_id *id;
|
||||
|
||||
if (!fsi_drv->id_table)
|
||||
return 0;
|
||||
|
||||
for (id = fsi_drv->id_table; id->engine_type; id++) {
|
||||
if (id->engine_type != fsi_dev->engine_type)
|
||||
continue;
|
||||
if (id->version == FSI_VERSION_ANY ||
|
||||
id->version == fsi_dev->version)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bus_type fsi_bus_type = {
|
||||
.name = "fsi",
|
||||
.match = fsi_bus_match,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(fsi_bus_type);
|
||||
|
||||
static int fsi_init(void)
|
||||
{
|
||||
return bus_register(&fsi_bus_type);
|
||||
}
|
||||
|
||||
static void fsi_exit(void)
|
||||
{
|
||||
bus_unregister(&fsi_bus_type);
|
||||
}
|
||||
|
||||
module_init(fsi_init);
|
||||
module_exit(fsi_exit);
|
@ -47,12 +47,8 @@ void vmbus_setevent(struct vmbus_channel *channel)
|
||||
* For channels marked as in "low latency" mode
|
||||
* bypass the monitor page mechanism.
|
||||
*/
|
||||
if ((channel->offermsg.monitor_allocated) &&
|
||||
(!channel->low_latency)) {
|
||||
/* Each u32 represents 32 channels */
|
||||
sync_set_bit(channel->offermsg.child_relid & 31,
|
||||
(unsigned long *) vmbus_connection.send_int_page +
|
||||
(channel->offermsg.child_relid >> 5));
|
||||
if (channel->offermsg.monitor_allocated && !channel->low_latency) {
|
||||
vmbus_send_interrupt(channel->offermsg.child_relid);
|
||||
|
||||
/* Get the child to parent monitor page */
|
||||
monitorpage = vmbus_connection.monitor_pages[1];
|
||||
@ -157,6 +153,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||||
}
|
||||
|
||||
init_completion(&open_info->waitevent);
|
||||
open_info->waiting_channel = newchannel;
|
||||
|
||||
open_msg = (struct vmbus_channel_open_channel *)open_info->msg;
|
||||
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
|
||||
@ -181,7 +178,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
ret = vmbus_post_msg(open_msg,
|
||||
sizeof(struct vmbus_channel_open_channel));
|
||||
sizeof(struct vmbus_channel_open_channel), true);
|
||||
|
||||
if (ret != 0) {
|
||||
err = ret;
|
||||
@ -194,6 +191,11 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
|
||||
list_del(&open_info->msglistentry);
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
if (newchannel->rescind) {
|
||||
err = -ENODEV;
|
||||
goto error_free_gpadl;
|
||||
}
|
||||
|
||||
if (open_info->response.open_result.status) {
|
||||
err = -EAGAIN;
|
||||
goto error_free_gpadl;
|
||||
@ -233,7 +235,7 @@ int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id,
|
||||
conn_msg.guest_endpoint_id = *shv_guest_servie_id;
|
||||
conn_msg.host_service_id = *shv_host_servie_id;
|
||||
|
||||
return vmbus_post_msg(&conn_msg, sizeof(conn_msg));
|
||||
return vmbus_post_msg(&conn_msg, sizeof(conn_msg), true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_send_tl_connect_request);
|
||||
|
||||
@ -405,6 +407,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
return ret;
|
||||
|
||||
init_completion(&msginfo->waitevent);
|
||||
msginfo->waiting_channel = channel;
|
||||
|
||||
gpadlmsg = (struct vmbus_channel_gpadl_header *)msginfo->msg;
|
||||
gpadlmsg->header.msgtype = CHANNELMSG_GPADL_HEADER;
|
||||
@ -419,7 +422,7 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
ret = vmbus_post_msg(gpadlmsg, msginfo->msgsize -
|
||||
sizeof(*msginfo));
|
||||
sizeof(*msginfo), true);
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
@ -433,14 +436,19 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
|
||||
gpadl_body->gpadl = next_gpadl_handle;
|
||||
|
||||
ret = vmbus_post_msg(gpadl_body,
|
||||
submsginfo->msgsize -
|
||||
sizeof(*submsginfo));
|
||||
submsginfo->msgsize - sizeof(*submsginfo),
|
||||
true);
|
||||
if (ret != 0)
|
||||
goto cleanup;
|
||||
|
||||
}
|
||||
wait_for_completion(&msginfo->waitevent);
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/* At this point, we received the gpadl created msg */
|
||||
*gpadl_handle = gpadlmsg->gpadl;
|
||||
|
||||
@ -474,6 +482,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
||||
return -ENOMEM;
|
||||
|
||||
init_completion(&info->waitevent);
|
||||
info->waiting_channel = channel;
|
||||
|
||||
msg = (struct vmbus_channel_gpadl_teardown *)info->msg;
|
||||
|
||||
@ -485,14 +494,19 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
|
||||
list_add_tail(&info->msglistentry,
|
||||
&vmbus_connection.chn_msg_list);
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
ret = vmbus_post_msg(msg,
|
||||
sizeof(struct vmbus_channel_gpadl_teardown));
|
||||
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_gpadl_teardown),
|
||||
true);
|
||||
|
||||
if (ret)
|
||||
goto post_msg_err;
|
||||
|
||||
wait_for_completion(&info->waitevent);
|
||||
|
||||
if (channel->rescind) {
|
||||
ret = -ENODEV;
|
||||
goto post_msg_err;
|
||||
}
|
||||
|
||||
post_msg_err:
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_del(&info->msglistentry);
|
||||
@ -516,7 +530,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* process_chn_event(), running in the tasklet, can race
|
||||
* vmbus_on_event(), running in the tasklet, can race
|
||||
* with vmbus_close_internal() in the case of SMP guest, e.g., when
|
||||
* the former is accessing channel->inbound.ring_buffer, the latter
|
||||
* could be freeing the ring_buffer pages.
|
||||
@ -557,7 +571,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
|
||||
msg->header.msgtype = CHANNELMSG_CLOSECHANNEL;
|
||||
msg->child_relid = channel->offermsg.child_relid;
|
||||
|
||||
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel));
|
||||
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_close_channel),
|
||||
true);
|
||||
|
||||
if (ret) {
|
||||
pr_err("Close failed: close post msg return is %d\n", ret);
|
||||
@ -628,15 +643,14 @@ void vmbus_close(struct vmbus_channel *channel)
|
||||
EXPORT_SYMBOL_GPL(vmbus_close);
|
||||
|
||||
int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
||||
u32 bufferlen, u64 requestid,
|
||||
enum vmbus_packet_type type, u32 flags, bool kick_q)
|
||||
u32 bufferlen, u64 requestid,
|
||||
enum vmbus_packet_type type, u32 flags)
|
||||
{
|
||||
struct vmpacket_descriptor desc;
|
||||
u32 packetlen = sizeof(struct vmpacket_descriptor) + bufferlen;
|
||||
u32 packetlen_aligned = ALIGN(packetlen, sizeof(u64));
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
int num_vecs = ((bufferlen != 0) ? 3 : 1);
|
||||
|
||||
|
||||
@ -655,9 +669,7 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, num_vecs,
|
||||
lock, kick_q);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, num_vecs);
|
||||
}
|
||||
EXPORT_SYMBOL(vmbus_sendpacket_ctl);
|
||||
|
||||
@ -680,7 +692,7 @@ int vmbus_sendpacket(struct vmbus_channel *channel, void *buffer,
|
||||
enum vmbus_packet_type type, u32 flags)
|
||||
{
|
||||
return vmbus_sendpacket_ctl(channel, buffer, bufferlen, requestid,
|
||||
type, flags, true);
|
||||
type, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(vmbus_sendpacket);
|
||||
|
||||
@ -692,11 +704,9 @@ EXPORT_SYMBOL(vmbus_sendpacket);
|
||||
* explicitly.
|
||||
*/
|
||||
int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
u32 pagecount, void *buffer, u32 bufferlen,
|
||||
u64 requestid,
|
||||
u32 flags,
|
||||
bool kick_q)
|
||||
struct hv_page_buffer pagebuffers[],
|
||||
u32 pagecount, void *buffer, u32 bufferlen,
|
||||
u64 requestid, u32 flags)
|
||||
{
|
||||
int i;
|
||||
struct vmbus_channel_packet_page_buffer desc;
|
||||
@ -705,12 +715,10 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
u32 packetlen_aligned;
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
|
||||
if (pagecount > MAX_PAGE_BUFFER_COUNT)
|
||||
return -EINVAL;
|
||||
|
||||
|
||||
/*
|
||||
* Adjust the size down since vmbus_channel_packet_page_buffer is the
|
||||
* largest size we support
|
||||
@ -742,8 +750,7 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3,
|
||||
lock, kick_q);
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer_ctl);
|
||||
|
||||
@ -757,9 +764,10 @@ int vmbus_sendpacket_pagebuffer(struct vmbus_channel *channel,
|
||||
u64 requestid)
|
||||
{
|
||||
u32 flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED;
|
||||
|
||||
return vmbus_sendpacket_pagebuffer_ctl(channel, pagebuffers, pagecount,
|
||||
buffer, bufferlen, requestid,
|
||||
flags, true);
|
||||
buffer, bufferlen,
|
||||
requestid, flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_pagebuffer);
|
||||
@ -778,7 +786,6 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
u32 packetlen_aligned;
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
|
||||
packetlen = desc_size + bufferlen;
|
||||
packetlen_aligned = ALIGN(packetlen, sizeof(u64));
|
||||
@ -798,8 +805,7 @@ int vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel,
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3,
|
||||
lock, true);
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_mpb_desc);
|
||||
|
||||
@ -817,7 +823,6 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
||||
u32 packetlen_aligned;
|
||||
struct kvec bufferlist[3];
|
||||
u64 aligned_data = 0;
|
||||
bool lock = channel->acquire_ring_lock;
|
||||
u32 pfncount = NUM_PAGES_SPANNED(multi_pagebuffer->offset,
|
||||
multi_pagebuffer->len);
|
||||
|
||||
@ -856,8 +861,7 @@ int vmbus_sendpacket_multipagebuffer(struct vmbus_channel *channel,
|
||||
bufferlist[2].iov_base = &aligned_data;
|
||||
bufferlist[2].iov_len = (packetlen_aligned - packetlen);
|
||||
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3,
|
||||
lock, true);
|
||||
return hv_ringbuffer_write(channel, bufferlist, 3);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vmbus_sendpacket_multipagebuffer);
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include <linux/completion.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <asm/mshyperv.h>
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
@ -147,6 +148,29 @@ static const struct {
|
||||
{ HV_RDV_GUID },
|
||||
};
|
||||
|
||||
/*
|
||||
* The rescinded channel may be blocked waiting for a response from the host;
|
||||
* take care of that.
|
||||
*/
|
||||
static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
|
||||
{
|
||||
struct vmbus_channel_msginfo *msginfo;
|
||||
unsigned long flags;
|
||||
|
||||
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
|
||||
msglistentry) {
|
||||
|
||||
if (msginfo->waiting_channel == channel) {
|
||||
complete(&msginfo->waitevent);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
}
|
||||
|
||||
static bool is_unsupported_vmbus_devs(const uuid_le *guid)
|
||||
{
|
||||
int i;
|
||||
@ -180,33 +204,34 @@ static u16 hv_get_dev_type(const struct vmbus_channel *channel)
|
||||
* @buf: Raw buffer channel data
|
||||
*
|
||||
* @icmsghdrp is of type &struct icmsg_hdr.
|
||||
* @negop is of type &struct icmsg_negotiate.
|
||||
* Set up and fill in default negotiate response message.
|
||||
*
|
||||
* The fw_version specifies the framework version that
|
||||
* we can support and srv_version specifies the service
|
||||
* version we can support.
|
||||
* The fw_version and fw_vercnt specifies the framework version that
|
||||
* we can support.
|
||||
*
|
||||
* The srv_version and srv_vercnt specifies the service
|
||||
* versions we can support.
|
||||
*
|
||||
* Versions are given in decreasing order.
|
||||
*
|
||||
* nego_fw_version and nego_srv_version store the selected protocol versions.
|
||||
*
|
||||
* Mainly used by Hyper-V drivers.
|
||||
*/
|
||||
bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
|
||||
struct icmsg_negotiate *negop, u8 *buf,
|
||||
int fw_version, int srv_version)
|
||||
u8 *buf, const int *fw_version, int fw_vercnt,
|
||||
const int *srv_version, int srv_vercnt,
|
||||
int *nego_fw_version, int *nego_srv_version)
|
||||
{
|
||||
int icframe_major, icframe_minor;
|
||||
int icmsg_major, icmsg_minor;
|
||||
int fw_major, fw_minor;
|
||||
int srv_major, srv_minor;
|
||||
int i;
|
||||
int i, j;
|
||||
bool found_match = false;
|
||||
struct icmsg_negotiate *negop;
|
||||
|
||||
icmsghdrp->icmsgsize = 0x10;
|
||||
fw_major = (fw_version >> 16);
|
||||
fw_minor = (fw_version & 0xFFFF);
|
||||
|
||||
srv_major = (srv_version >> 16);
|
||||
srv_minor = (srv_version & 0xFFFF);
|
||||
|
||||
negop = (struct icmsg_negotiate *)&buf[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
sizeof(struct icmsg_hdr)];
|
||||
@ -222,13 +247,22 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
|
||||
* support.
|
||||
*/
|
||||
|
||||
for (i = 0; i < negop->icframe_vercnt; i++) {
|
||||
if ((negop->icversion_data[i].major == fw_major) &&
|
||||
(negop->icversion_data[i].minor == fw_minor)) {
|
||||
icframe_major = negop->icversion_data[i].major;
|
||||
icframe_minor = negop->icversion_data[i].minor;
|
||||
found_match = true;
|
||||
for (i = 0; i < fw_vercnt; i++) {
|
||||
fw_major = (fw_version[i] >> 16);
|
||||
fw_minor = (fw_version[i] & 0xFFFF);
|
||||
|
||||
for (j = 0; j < negop->icframe_vercnt; j++) {
|
||||
if ((negop->icversion_data[j].major == fw_major) &&
|
||||
(negop->icversion_data[j].minor == fw_minor)) {
|
||||
icframe_major = negop->icversion_data[j].major;
|
||||
icframe_minor = negop->icversion_data[j].minor;
|
||||
found_match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_match)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!found_match)
|
||||
@ -236,14 +270,26 @@ bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
|
||||
|
||||
found_match = false;
|
||||
|
||||
for (i = negop->icframe_vercnt;
|
||||
(i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
|
||||
if ((negop->icversion_data[i].major == srv_major) &&
|
||||
(negop->icversion_data[i].minor == srv_minor)) {
|
||||
icmsg_major = negop->icversion_data[i].major;
|
||||
icmsg_minor = negop->icversion_data[i].minor;
|
||||
found_match = true;
|
||||
for (i = 0; i < srv_vercnt; i++) {
|
||||
srv_major = (srv_version[i] >> 16);
|
||||
srv_minor = (srv_version[i] & 0xFFFF);
|
||||
|
||||
for (j = negop->icframe_vercnt;
|
||||
(j < negop->icframe_vercnt + negop->icmsg_vercnt);
|
||||
j++) {
|
||||
|
||||
if ((negop->icversion_data[j].major == srv_major) &&
|
||||
(negop->icversion_data[j].minor == srv_minor)) {
|
||||
|
||||
icmsg_major = negop->icversion_data[j].major;
|
||||
icmsg_minor = negop->icversion_data[j].minor;
|
||||
found_match = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found_match)
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -260,6 +306,12 @@ fw_error:
|
||||
negop->icmsg_vercnt = 1;
|
||||
}
|
||||
|
||||
if (nego_fw_version)
|
||||
*nego_fw_version = (icframe_major << 16) | icframe_minor;
|
||||
|
||||
if (nego_srv_version)
|
||||
*nego_srv_version = (icmsg_major << 16) | icmsg_minor;
|
||||
|
||||
negop->icversion_data[0].major = icframe_major;
|
||||
negop->icversion_data[0].minor = icframe_minor;
|
||||
negop->icversion_data[1].major = icmsg_major;
|
||||
@ -280,13 +332,15 @@ static struct vmbus_channel *alloc_channel(void)
|
||||
if (!channel)
|
||||
return NULL;
|
||||
|
||||
channel->acquire_ring_lock = true;
|
||||
spin_lock_init(&channel->inbound_lock);
|
||||
spin_lock_init(&channel->lock);
|
||||
|
||||
INIT_LIST_HEAD(&channel->sc_list);
|
||||
INIT_LIST_HEAD(&channel->percpu_list);
|
||||
|
||||
tasklet_init(&channel->callback_event,
|
||||
vmbus_on_event, (unsigned long)channel);
|
||||
|
||||
return channel;
|
||||
}
|
||||
|
||||
@ -295,15 +349,17 @@ static struct vmbus_channel *alloc_channel(void)
|
||||
*/
|
||||
static void free_channel(struct vmbus_channel *channel)
|
||||
{
|
||||
tasklet_kill(&channel->callback_event);
|
||||
kfree(channel);
|
||||
}
|
||||
|
||||
static void percpu_channel_enq(void *arg)
|
||||
{
|
||||
struct vmbus_channel *channel = arg;
|
||||
int cpu = smp_processor_id();
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= this_cpu_ptr(hv_context.cpu_context);
|
||||
|
||||
list_add_tail(&channel->percpu_list, &hv_context.percpu_list[cpu]);
|
||||
list_add_tail(&channel->percpu_list, &hv_cpu->chan_list);
|
||||
}
|
||||
|
||||
static void percpu_channel_deq(void *arg)
|
||||
@ -321,24 +377,21 @@ static void vmbus_release_relid(u32 relid)
|
||||
memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
|
||||
msg.child_relid = relid;
|
||||
msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
|
||||
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released));
|
||||
vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
|
||||
true);
|
||||
}
|
||||
|
||||
void hv_event_tasklet_disable(struct vmbus_channel *channel)
|
||||
{
|
||||
struct tasklet_struct *tasklet;
|
||||
tasklet = hv_context.event_dpc[channel->target_cpu];
|
||||
tasklet_disable(tasklet);
|
||||
tasklet_disable(&channel->callback_event);
|
||||
}
|
||||
|
||||
void hv_event_tasklet_enable(struct vmbus_channel *channel)
|
||||
{
|
||||
struct tasklet_struct *tasklet;
|
||||
tasklet = hv_context.event_dpc[channel->target_cpu];
|
||||
tasklet_enable(tasklet);
|
||||
tasklet_enable(&channel->callback_event);
|
||||
|
||||
/* In case there is any pending event */
|
||||
tasklet_schedule(tasklet);
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
|
||||
void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
|
||||
@ -673,9 +726,12 @@ static void vmbus_wait_for_unload(void)
|
||||
break;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
page_addr = hv_context.synic_message_page[cpu];
|
||||
msg = (struct hv_message *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
page_addr = hv_cpu->synic_message_page;
|
||||
msg = (struct hv_message *)page_addr
|
||||
+ VMBUS_MESSAGE_SINT;
|
||||
|
||||
message_type = READ_ONCE(msg->header.message_type);
|
||||
if (message_type == HVMSG_NONE)
|
||||
@ -699,7 +755,10 @@ static void vmbus_wait_for_unload(void)
|
||||
* messages after we reconnect.
|
||||
*/
|
||||
for_each_online_cpu(cpu) {
|
||||
page_addr = hv_context.synic_message_page[cpu];
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
page_addr = hv_cpu->synic_message_page;
|
||||
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
|
||||
msg->header.message_type = HVMSG_NONE;
|
||||
}
|
||||
@ -728,7 +787,8 @@ void vmbus_initiate_unload(bool crash)
|
||||
init_completion(&vmbus_connection.unload_event);
|
||||
memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
|
||||
hdr.msgtype = CHANNELMSG_UNLOAD;
|
||||
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header));
|
||||
vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
|
||||
!crash);
|
||||
|
||||
/*
|
||||
* vmbus_initiate_unload() is also called on crash and the crash can be
|
||||
@ -758,13 +818,6 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* By default we setup state to enable batched
|
||||
* reading. A specific service can choose to
|
||||
* disable this prior to opening the channel.
|
||||
*/
|
||||
newchannel->batched_reading = true;
|
||||
|
||||
/*
|
||||
* Setup state for signalling the host.
|
||||
*/
|
||||
@ -823,6 +876,8 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
|
||||
channel->rescind = true;
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
|
||||
vmbus_rescind_cleanup(channel);
|
||||
|
||||
if (channel->device_obj) {
|
||||
if (channel->chn_rescind_callback) {
|
||||
channel->chn_rescind_callback(channel);
|
||||
@ -1116,8 +1171,8 @@ int vmbus_request_offers(void)
|
||||
msg->msgtype = CHANNELMSG_REQUESTOFFERS;
|
||||
|
||||
|
||||
ret = vmbus_post_msg(msg,
|
||||
sizeof(struct vmbus_channel_message_header));
|
||||
ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
|
||||
true);
|
||||
if (ret != 0) {
|
||||
pr_err("Unable to request offers - %d\n", ret);
|
||||
|
||||
|
@ -93,12 +93,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
||||
* all the CPUs. This is needed for kexec to work correctly where
|
||||
* the CPU attempting to connect may not be CPU 0.
|
||||
*/
|
||||
if (version >= VERSION_WIN8_1) {
|
||||
msg->target_vcpu = hv_context.vp_index[get_cpu()];
|
||||
put_cpu();
|
||||
} else {
|
||||
if (version >= VERSION_WIN8_1)
|
||||
msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
|
||||
else
|
||||
msg->target_vcpu = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add to list before we send the request since we may
|
||||
@ -111,7 +109,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
||||
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
|
||||
|
||||
ret = vmbus_post_msg(msg,
|
||||
sizeof(struct vmbus_channel_initiate_contact));
|
||||
sizeof(struct vmbus_channel_initiate_contact),
|
||||
true);
|
||||
if (ret != 0) {
|
||||
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
|
||||
list_del(&msginfo->msglistentry);
|
||||
@ -220,11 +219,8 @@ int vmbus_connect(void)
|
||||
goto cleanup;
|
||||
|
||||
vmbus_proto_version = version;
|
||||
pr_info("Hyper-V Host Build:%d-%d.%d-%d-%d.%d; Vmbus version:%d.%d\n",
|
||||
host_info_eax, host_info_ebx >> 16,
|
||||
host_info_ebx & 0xFFFF, host_info_ecx,
|
||||
host_info_edx >> 24, host_info_edx & 0xFFFFFF,
|
||||
version >> 16, version & 0xFFFF);
|
||||
pr_info("Vmbus version:%d.%d\n",
|
||||
version >> 16, version & 0xFFFF);
|
||||
|
||||
kfree(msginfo);
|
||||
return 0;
|
||||
@ -263,29 +259,6 @@ void vmbus_disconnect(void)
|
||||
vmbus_connection.monitor_pages[1] = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map the given relid to the corresponding channel based on the
|
||||
* per-cpu list of channels that have been affinitized to this CPU.
|
||||
* This will be used in the channel callback path as we can do this
|
||||
* mapping in a lock-free fashion.
|
||||
*/
|
||||
static struct vmbus_channel *pcpu_relid2channel(u32 relid)
|
||||
{
|
||||
struct vmbus_channel *channel;
|
||||
struct vmbus_channel *found_channel = NULL;
|
||||
int cpu = smp_processor_id();
|
||||
struct list_head *pcpu_head = &hv_context.percpu_list[cpu];
|
||||
|
||||
list_for_each_entry(channel, pcpu_head, percpu_list) {
|
||||
if (channel->offermsg.child_relid == relid) {
|
||||
found_channel = channel;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return found_channel;
|
||||
}
|
||||
|
||||
/*
|
||||
* relid2channel - Get the channel object given its
|
||||
* child relative id (ie channel id)
|
||||
@ -322,23 +295,12 @@ struct vmbus_channel *relid2channel(u32 relid)
|
||||
}
|
||||
|
||||
/*
|
||||
* process_chn_event - Process a channel event notification
|
||||
* vmbus_on_event - Process a channel event notification
|
||||
*/
|
||||
static void process_chn_event(u32 relid)
|
||||
void vmbus_on_event(unsigned long data)
|
||||
{
|
||||
struct vmbus_channel *channel;
|
||||
void *arg;
|
||||
bool read_state;
|
||||
u32 bytes_to_read;
|
||||
|
||||
/*
|
||||
* Find the channel based on this relid and invokes the
|
||||
* channel callback to process the event
|
||||
*/
|
||||
channel = pcpu_relid2channel(relid);
|
||||
|
||||
if (!channel)
|
||||
return;
|
||||
struct vmbus_channel *channel = (void *) data;
|
||||
void (*callback_fn)(void *);
|
||||
|
||||
/*
|
||||
* A channel once created is persistent even when there
|
||||
@ -348,10 +310,13 @@ static void process_chn_event(u32 relid)
|
||||
* Thus, checking and invoking the driver specific callback takes
|
||||
* care of orderly unloading of the driver.
|
||||
*/
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (unlikely(callback_fn == NULL))
|
||||
return;
|
||||
|
||||
if (channel->onchannel_callback != NULL) {
|
||||
arg = channel->channel_callback_context;
|
||||
read_state = channel->batched_reading;
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
|
||||
if (channel->callback_mode == HV_CALL_BATCHED) {
|
||||
/*
|
||||
* This callback reads the messages sent by the host.
|
||||
* We can optimize host to guest signaling by ensuring:
|
||||
@ -363,71 +328,10 @@ static void process_chn_event(u32 relid)
|
||||
* state is set we check to see if additional packets are
|
||||
* available to read. In this case we repeat the process.
|
||||
*/
|
||||
if (hv_end_read(&channel->inbound) != 0) {
|
||||
hv_begin_read(&channel->inbound);
|
||||
|
||||
do {
|
||||
if (read_state)
|
||||
hv_begin_read(&channel->inbound);
|
||||
channel->onchannel_callback(arg);
|
||||
if (read_state)
|
||||
bytes_to_read = hv_end_read(&channel->inbound);
|
||||
else
|
||||
bytes_to_read = 0;
|
||||
} while (read_state && (bytes_to_read != 0));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* vmbus_on_event - Handler for events
|
||||
*/
|
||||
void vmbus_on_event(unsigned long data)
|
||||
{
|
||||
u32 dword;
|
||||
u32 maxdword;
|
||||
int bit;
|
||||
u32 relid;
|
||||
u32 *recv_int_page = NULL;
|
||||
void *page_addr;
|
||||
int cpu = smp_processor_id();
|
||||
union hv_synic_event_flags *event;
|
||||
|
||||
if (vmbus_proto_version < VERSION_WIN8) {
|
||||
maxdword = MAX_NUM_CHANNELS_SUPPORTED >> 5;
|
||||
recv_int_page = vmbus_connection.recv_int_page;
|
||||
} else {
|
||||
/*
|
||||
* When the host is win8 and beyond, the event page
|
||||
* can be directly checked to get the id of the channel
|
||||
* that has the interrupt pending.
|
||||
*/
|
||||
maxdword = HV_EVENT_FLAGS_DWORD_COUNT;
|
||||
page_addr = hv_context.synic_event_page[cpu];
|
||||
event = (union hv_synic_event_flags *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
recv_int_page = event->flags32;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Check events */
|
||||
if (!recv_int_page)
|
||||
return;
|
||||
for (dword = 0; dword < maxdword; dword++) {
|
||||
if (!recv_int_page[dword])
|
||||
continue;
|
||||
for (bit = 0; bit < 32; bit++) {
|
||||
if (sync_test_and_clear_bit(bit,
|
||||
(unsigned long *)&recv_int_page[dword])) {
|
||||
relid = (dword << 5) + bit;
|
||||
|
||||
if (relid == 0)
|
||||
/*
|
||||
* Special case - vmbus
|
||||
* channel protocol msg
|
||||
*/
|
||||
continue;
|
||||
|
||||
process_chn_event(relid);
|
||||
}
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -435,7 +339,7 @@ void vmbus_on_event(unsigned long data)
|
||||
/*
|
||||
* vmbus_post_msg - Send a msg on the vmbus's message connection
|
||||
*/
|
||||
int vmbus_post_msg(void *buffer, size_t buflen)
|
||||
int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep)
|
||||
{
|
||||
union hv_connection_id conn_id;
|
||||
int ret = 0;
|
||||
@ -450,7 +354,7 @@ int vmbus_post_msg(void *buffer, size_t buflen)
|
||||
* insufficient resources. Retry the operation a couple of
|
||||
* times before giving up.
|
||||
*/
|
||||
while (retries < 20) {
|
||||
while (retries < 100) {
|
||||
ret = hv_post_message(conn_id, 1, buffer, buflen);
|
||||
|
||||
switch (ret) {
|
||||
@ -473,8 +377,14 @@ int vmbus_post_msg(void *buffer, size_t buflen)
|
||||
}
|
||||
|
||||
retries++;
|
||||
udelay(usec);
|
||||
if (usec < 2048)
|
||||
if (can_sleep && usec > 1000)
|
||||
msleep(usec / 1000);
|
||||
else if (usec < MAX_UDELAY_MS * 1000)
|
||||
udelay(usec);
|
||||
else
|
||||
mdelay(usec / 1000);
|
||||
|
||||
if (usec < 256000)
|
||||
usec *= 2;
|
||||
}
|
||||
return ret;
|
||||
@ -487,12 +397,8 @@ void vmbus_set_event(struct vmbus_channel *channel)
|
||||
{
|
||||
u32 child_relid = channel->offermsg.child_relid;
|
||||
|
||||
if (!channel->is_dedicated_interrupt) {
|
||||
/* Each u32 represents 32 channels */
|
||||
sync_set_bit(child_relid & 31,
|
||||
(unsigned long *)vmbus_connection.send_int_page +
|
||||
(child_relid >> 5));
|
||||
}
|
||||
if (!channel->is_dedicated_interrupt)
|
||||
vmbus_send_interrupt(child_relid);
|
||||
|
||||
hv_do_hypercall(HVCALL_SIGNAL_EVENT, channel->sig_event, NULL);
|
||||
}
|
||||
|
475
drivers/hv/hv.c
475
drivers/hv/hv.c
@ -36,154 +36,12 @@
|
||||
/* The one and only */
|
||||
struct hv_context hv_context = {
|
||||
.synic_initialized = false,
|
||||
.hypercall_page = NULL,
|
||||
};
|
||||
|
||||
#define HV_TIMER_FREQUENCY (10 * 1000 * 1000) /* 100ns period */
|
||||
#define HV_MAX_MAX_DELTA_TICKS 0xffffffff
|
||||
#define HV_MIN_DELTA_TICKS 1
|
||||
|
||||
/*
|
||||
* query_hypervisor_info - Get version info of the windows hypervisor
|
||||
*/
|
||||
unsigned int host_info_eax;
|
||||
unsigned int host_info_ebx;
|
||||
unsigned int host_info_ecx;
|
||||
unsigned int host_info_edx;
|
||||
|
||||
static int query_hypervisor_info(void)
|
||||
{
|
||||
unsigned int eax;
|
||||
unsigned int ebx;
|
||||
unsigned int ecx;
|
||||
unsigned int edx;
|
||||
unsigned int max_leaf;
|
||||
unsigned int op;
|
||||
|
||||
/*
|
||||
* Its assumed that this is called after confirming that Viridian
|
||||
* is present. Query id and revision.
|
||||
*/
|
||||
eax = 0;
|
||||
ebx = 0;
|
||||
ecx = 0;
|
||||
edx = 0;
|
||||
op = HVCPUID_VENDOR_MAXFUNCTION;
|
||||
cpuid(op, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
max_leaf = eax;
|
||||
|
||||
if (max_leaf >= HVCPUID_VERSION) {
|
||||
eax = 0;
|
||||
ebx = 0;
|
||||
ecx = 0;
|
||||
edx = 0;
|
||||
op = HVCPUID_VERSION;
|
||||
cpuid(op, &eax, &ebx, &ecx, &edx);
|
||||
host_info_eax = eax;
|
||||
host_info_ebx = ebx;
|
||||
host_info_ecx = ecx;
|
||||
host_info_edx = edx;
|
||||
}
|
||||
return max_leaf;
|
||||
}
|
||||
|
||||
/*
|
||||
* hv_do_hypercall- Invoke the specified hypercall
|
||||
*/
|
||||
u64 hv_do_hypercall(u64 control, void *input, void *output)
|
||||
{
|
||||
u64 input_address = (input) ? virt_to_phys(input) : 0;
|
||||
u64 output_address = (output) ? virt_to_phys(output) : 0;
|
||||
void *hypercall_page = hv_context.hypercall_page;
|
||||
#ifdef CONFIG_X86_64
|
||||
u64 hv_status = 0;
|
||||
|
||||
if (!hypercall_page)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__("mov %0, %%r8" : : "r" (output_address) : "r8");
|
||||
__asm__ __volatile__("call *%3" : "=a" (hv_status) :
|
||||
"c" (control), "d" (input_address),
|
||||
"m" (hypercall_page));
|
||||
|
||||
return hv_status;
|
||||
|
||||
#else
|
||||
|
||||
u32 control_hi = control >> 32;
|
||||
u32 control_lo = control & 0xFFFFFFFF;
|
||||
u32 hv_status_hi = 1;
|
||||
u32 hv_status_lo = 1;
|
||||
u32 input_address_hi = input_address >> 32;
|
||||
u32 input_address_lo = input_address & 0xFFFFFFFF;
|
||||
u32 output_address_hi = output_address >> 32;
|
||||
u32 output_address_lo = output_address & 0xFFFFFFFF;
|
||||
|
||||
if (!hypercall_page)
|
||||
return (u64)ULLONG_MAX;
|
||||
|
||||
__asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
|
||||
"=a"(hv_status_lo) : "d" (control_hi),
|
||||
"a" (control_lo), "b" (input_address_hi),
|
||||
"c" (input_address_lo), "D"(output_address_hi),
|
||||
"S"(output_address_lo), "m" (hypercall_page));
|
||||
|
||||
return hv_status_lo | ((u64)hv_status_hi << 32);
|
||||
#endif /* !x86_64 */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hv_do_hypercall);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static u64 read_hv_clock_tsc(struct clocksource *arg)
|
||||
{
|
||||
u64 current_tick;
|
||||
struct ms_hyperv_tsc_page *tsc_pg = hv_context.tsc_page;
|
||||
|
||||
if (tsc_pg->tsc_sequence != 0) {
|
||||
/*
|
||||
* Use the tsc page to compute the value.
|
||||
*/
|
||||
|
||||
while (1) {
|
||||
u64 tmp;
|
||||
u32 sequence = tsc_pg->tsc_sequence;
|
||||
u64 cur_tsc;
|
||||
u64 scale = tsc_pg->tsc_scale;
|
||||
s64 offset = tsc_pg->tsc_offset;
|
||||
|
||||
rdtscll(cur_tsc);
|
||||
/* current_tick = ((cur_tsc *scale) >> 64) + offset */
|
||||
asm("mulq %3"
|
||||
: "=d" (current_tick), "=a" (tmp)
|
||||
: "a" (cur_tsc), "r" (scale));
|
||||
|
||||
current_tick += offset;
|
||||
if (tsc_pg->tsc_sequence == sequence)
|
||||
return current_tick;
|
||||
|
||||
if (tsc_pg->tsc_sequence != 0)
|
||||
continue;
|
||||
/*
|
||||
* Fallback using MSR method.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
return current_tick;
|
||||
}
|
||||
|
||||
static struct clocksource hyperv_cs_tsc = {
|
||||
.name = "hyperv_clocksource_tsc_page",
|
||||
.rating = 425,
|
||||
.read = read_hv_clock_tsc,
|
||||
.mask = CLOCKSOURCE_MASK(64),
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* hv_init - Main initialization routine.
|
||||
*
|
||||
@ -191,129 +49,14 @@ static struct clocksource hyperv_cs_tsc = {
|
||||
*/
|
||||
int hv_init(void)
|
||||
{
|
||||
int max_leaf;
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
void *virtaddr = NULL;
|
||||
if (!hv_is_hypercall_page_setup())
|
||||
return -ENOTSUPP;
|
||||
|
||||
memset(hv_context.synic_event_page, 0, sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.synic_message_page, 0,
|
||||
sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.post_msg_page, 0,
|
||||
sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.vp_index, 0,
|
||||
sizeof(int) * NR_CPUS);
|
||||
memset(hv_context.event_dpc, 0,
|
||||
sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.msg_dpc, 0,
|
||||
sizeof(void *) * NR_CPUS);
|
||||
memset(hv_context.clk_evt, 0,
|
||||
sizeof(void *) * NR_CPUS);
|
||||
hv_context.cpu_context = alloc_percpu(struct hv_per_cpu_context);
|
||||
if (!hv_context.cpu_context)
|
||||
return -ENOMEM;
|
||||
|
||||
max_leaf = query_hypervisor_info();
|
||||
|
||||
/*
|
||||
* Write our OS ID.
|
||||
*/
|
||||
hv_context.guestid = generate_guest_id(0, LINUX_VERSION_CODE, 0);
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
|
||||
|
||||
/* See if the hypercall page is already set */
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
virtaddr = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL_EXEC);
|
||||
|
||||
if (!virtaddr)
|
||||
goto cleanup;
|
||||
|
||||
hypercall_msr.enable = 1;
|
||||
|
||||
hypercall_msr.guest_physical_address = vmalloc_to_pfn(virtaddr);
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
/* Confirm that hypercall page did get setup. */
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
rdmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
|
||||
if (!hypercall_msr.enable)
|
||||
goto cleanup;
|
||||
|
||||
hv_context.hypercall_page = virtaddr;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
||||
union hv_x64_msr_hypercall_contents tsc_msr;
|
||||
void *va_tsc;
|
||||
|
||||
va_tsc = __vmalloc(PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
|
||||
if (!va_tsc)
|
||||
goto cleanup;
|
||||
hv_context.tsc_page = va_tsc;
|
||||
|
||||
rdmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
|
||||
tsc_msr.enable = 1;
|
||||
tsc_msr.guest_physical_address = vmalloc_to_pfn(va_tsc);
|
||||
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
|
||||
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
if (virtaddr) {
|
||||
if (hypercall_msr.enable) {
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
}
|
||||
|
||||
vfree(virtaddr);
|
||||
}
|
||||
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
/*
|
||||
* hv_cleanup - Cleanup routine.
|
||||
*
|
||||
* This routine is called normally during driver unloading or exiting.
|
||||
*/
|
||||
void hv_cleanup(bool crash)
|
||||
{
|
||||
union hv_x64_msr_hypercall_contents hypercall_msr;
|
||||
|
||||
/* Reset our OS id */
|
||||
wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
|
||||
|
||||
if (hv_context.hypercall_page) {
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
|
||||
if (!crash)
|
||||
vfree(hv_context.hypercall_page);
|
||||
hv_context.hypercall_page = NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Cleanup the TSC page based CS.
|
||||
*/
|
||||
if (ms_hyperv.features & HV_X64_MSR_REFERENCE_TSC_AVAILABLE) {
|
||||
/*
|
||||
* Crash can happen in an interrupt context and unregistering
|
||||
* a clocksource is impossible and redundant in this case.
|
||||
*/
|
||||
if (!oops_in_progress) {
|
||||
clocksource_change_rating(&hyperv_cs_tsc, 10);
|
||||
clocksource_unregister(&hyperv_cs_tsc);
|
||||
}
|
||||
|
||||
hypercall_msr.as_uint64 = 0;
|
||||
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
|
||||
if (!crash)
|
||||
vfree(hv_context.tsc_page);
|
||||
hv_context.tsc_page = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
@ -325,25 +68,24 @@ int hv_post_message(union hv_connection_id connection_id,
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size)
|
||||
{
|
||||
|
||||
struct hv_input_post_message *aligned_msg;
|
||||
struct hv_per_cpu_context *hv_cpu;
|
||||
u64 status;
|
||||
|
||||
if (payload_size > HV_MESSAGE_PAYLOAD_BYTE_COUNT)
|
||||
return -EMSGSIZE;
|
||||
|
||||
aligned_msg = (struct hv_input_post_message *)
|
||||
hv_context.post_msg_page[get_cpu()];
|
||||
|
||||
hv_cpu = get_cpu_ptr(hv_context.cpu_context);
|
||||
aligned_msg = hv_cpu->post_msg_page;
|
||||
aligned_msg->connectionid = connection_id;
|
||||
aligned_msg->reserved = 0;
|
||||
aligned_msg->message_type = message_type;
|
||||
aligned_msg->payload_size = payload_size;
|
||||
memcpy((void *)aligned_msg->payload, payload, payload_size);
|
||||
put_cpu_ptr(hv_cpu);
|
||||
|
||||
status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
|
||||
|
||||
put_cpu();
|
||||
return status & 0xFFFF;
|
||||
}
|
||||
|
||||
@ -354,16 +96,16 @@ static int hv_ce_set_next_event(unsigned long delta,
|
||||
|
||||
WARN_ON(!clockevent_state_oneshot(evt));
|
||||
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
hv_get_current_tick(current_tick);
|
||||
current_tick += delta;
|
||||
wrmsrl(HV_X64_MSR_STIMER0_COUNT, current_tick);
|
||||
hv_init_timer(HV_X64_MSR_STIMER0_COUNT, current_tick);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hv_ce_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
wrmsrl(HV_X64_MSR_STIMER0_COUNT, 0);
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, 0);
|
||||
hv_init_timer(HV_X64_MSR_STIMER0_COUNT, 0);
|
||||
hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -375,7 +117,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
|
||||
timer_cfg.enable = 1;
|
||||
timer_cfg.auto_enable = 1;
|
||||
timer_cfg.sintx = VMBUS_MESSAGE_SINT;
|
||||
wrmsrl(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
|
||||
hv_init_timer_config(HV_X64_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -400,8 +142,6 @@ static void hv_init_clockevent_device(struct clock_event_device *dev, int cpu)
|
||||
|
||||
int hv_synic_alloc(void)
|
||||
{
|
||||
size_t size = sizeof(struct tasklet_struct);
|
||||
size_t ced_size = sizeof(struct clock_event_device);
|
||||
int cpu;
|
||||
|
||||
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
|
||||
@ -411,52 +151,42 @@ int hv_synic_alloc(void)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
|
||||
if (hv_context.event_dpc[cpu] == NULL) {
|
||||
pr_err("Unable to allocate event dpc\n");
|
||||
goto err;
|
||||
}
|
||||
tasklet_init(hv_context.event_dpc[cpu], vmbus_on_event, cpu);
|
||||
for_each_present_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
hv_context.msg_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
|
||||
if (hv_context.msg_dpc[cpu] == NULL) {
|
||||
pr_err("Unable to allocate event dpc\n");
|
||||
goto err;
|
||||
}
|
||||
tasklet_init(hv_context.msg_dpc[cpu], vmbus_on_msg_dpc, cpu);
|
||||
memset(hv_cpu, 0, sizeof(*hv_cpu));
|
||||
tasklet_init(&hv_cpu->msg_dpc,
|
||||
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
|
||||
|
||||
hv_context.clk_evt[cpu] = kzalloc(ced_size, GFP_ATOMIC);
|
||||
if (hv_context.clk_evt[cpu] == NULL) {
|
||||
hv_cpu->clk_evt = kzalloc(sizeof(struct clock_event_device),
|
||||
GFP_KERNEL);
|
||||
if (hv_cpu->clk_evt == NULL) {
|
||||
pr_err("Unable to allocate clock event device\n");
|
||||
goto err;
|
||||
}
|
||||
hv_init_clockevent_device(hv_cpu->clk_evt, cpu);
|
||||
|
||||
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
|
||||
|
||||
hv_context.synic_message_page[cpu] =
|
||||
hv_cpu->synic_message_page =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
|
||||
if (hv_context.synic_message_page[cpu] == NULL) {
|
||||
if (hv_cpu->synic_message_page == NULL) {
|
||||
pr_err("Unable to allocate SYNIC message page\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_context.synic_event_page[cpu] =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
|
||||
if (hv_context.synic_event_page[cpu] == NULL) {
|
||||
hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->synic_event_page == NULL) {
|
||||
pr_err("Unable to allocate SYNIC event page\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_context.post_msg_page[cpu] =
|
||||
(void *)get_zeroed_page(GFP_ATOMIC);
|
||||
|
||||
if (hv_context.post_msg_page[cpu] == NULL) {
|
||||
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
|
||||
if (hv_cpu->post_msg_page == NULL) {
|
||||
pr_err("Unable to allocate post msg page\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&hv_cpu->chan_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -464,26 +194,24 @@ err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void hv_synic_free_cpu(int cpu)
|
||||
{
|
||||
kfree(hv_context.event_dpc[cpu]);
|
||||
kfree(hv_context.msg_dpc[cpu]);
|
||||
kfree(hv_context.clk_evt[cpu]);
|
||||
if (hv_context.synic_event_page[cpu])
|
||||
free_page((unsigned long)hv_context.synic_event_page[cpu]);
|
||||
if (hv_context.synic_message_page[cpu])
|
||||
free_page((unsigned long)hv_context.synic_message_page[cpu]);
|
||||
if (hv_context.post_msg_page[cpu])
|
||||
free_page((unsigned long)hv_context.post_msg_page[cpu]);
|
||||
}
|
||||
|
||||
void hv_synic_free(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
if (hv_cpu->synic_event_page)
|
||||
free_page((unsigned long)hv_cpu->synic_event_page);
|
||||
if (hv_cpu->synic_message_page)
|
||||
free_page((unsigned long)hv_cpu->synic_message_page);
|
||||
if (hv_cpu->post_msg_page)
|
||||
free_page((unsigned long)hv_cpu->post_msg_page);
|
||||
}
|
||||
|
||||
kfree(hv_context.hv_numa_map);
|
||||
for_each_online_cpu(cpu)
|
||||
hv_synic_free_cpu(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -493,54 +221,49 @@ void hv_synic_free(void)
|
||||
* retrieve the initialized message and event pages. Otherwise, we create and
|
||||
* initialize the message and event pages.
|
||||
*/
|
||||
void hv_synic_init(void *arg)
|
||||
int hv_synic_init(unsigned int cpu)
|
||||
{
|
||||
u64 version;
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
union hv_synic_sint shared_sint;
|
||||
union hv_synic_scontrol sctrl;
|
||||
u64 vp_index;
|
||||
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (!hv_context.hypercall_page)
|
||||
return;
|
||||
|
||||
/* Check the version */
|
||||
rdmsrl(HV_X64_MSR_SVERSION, version);
|
||||
|
||||
/* Setup the Synic's message page */
|
||||
rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
|
||||
hv_get_simp(simp.as_uint64);
|
||||
simp.simp_enabled = 1;
|
||||
simp.base_simp_gpa = virt_to_phys(hv_context.synic_message_page[cpu])
|
||||
simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
|
||||
>> PAGE_SHIFT;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
|
||||
hv_set_simp(simp.as_uint64);
|
||||
|
||||
/* Setup the Synic's event page */
|
||||
rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
|
||||
hv_get_siefp(siefp.as_uint64);
|
||||
siefp.siefp_enabled = 1;
|
||||
siefp.base_siefp_gpa = virt_to_phys(hv_context.synic_event_page[cpu])
|
||||
siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
|
||||
>> PAGE_SHIFT;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
|
||||
hv_set_siefp(siefp.as_uint64);
|
||||
|
||||
/* Setup the shared SINT. */
|
||||
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
||||
shared_sint.as_uint64 = 0;
|
||||
shared_sint.vector = HYPERVISOR_CALLBACK_VECTOR;
|
||||
shared_sint.masked = false;
|
||||
shared_sint.auto_eoi = true;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
||||
/* Enable the global synic bit */
|
||||
rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
hv_get_synic_state(sctrl.as_uint64);
|
||||
sctrl.enable = 1;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
hv_set_synic_state(sctrl.as_uint64);
|
||||
|
||||
hv_context.synic_initialized = true;
|
||||
|
||||
@ -549,20 +272,18 @@ void hv_synic_init(void *arg)
|
||||
* of cpuid and Linux' notion of cpuid.
|
||||
* This array will be indexed using Linux cpuid.
|
||||
*/
|
||||
rdmsrl(HV_X64_MSR_VP_INDEX, vp_index);
|
||||
hv_get_vp_index(vp_index);
|
||||
hv_context.vp_index[cpu] = (u32)vp_index;
|
||||
|
||||
INIT_LIST_HEAD(&hv_context.percpu_list[cpu]);
|
||||
|
||||
/*
|
||||
* Register the per-cpu clockevent source.
|
||||
*/
|
||||
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE)
|
||||
clockevents_config_and_register(hv_context.clk_evt[cpu],
|
||||
clockevents_config_and_register(hv_cpu->clk_evt,
|
||||
HV_TIMER_FREQUENCY,
|
||||
HV_MIN_DELTA_TICKS,
|
||||
HV_MAX_MAX_DELTA_TICKS);
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -575,52 +296,94 @@ void hv_synic_clockevents_cleanup(void)
|
||||
if (!(ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE))
|
||||
return;
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
|
||||
for_each_present_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* hv_synic_cleanup - Cleanup routine for hv_synic_init().
|
||||
*/
|
||||
void hv_synic_cleanup(void *arg)
|
||||
int hv_synic_cleanup(unsigned int cpu)
|
||||
{
|
||||
union hv_synic_sint shared_sint;
|
||||
union hv_synic_simp simp;
|
||||
union hv_synic_siefp siefp;
|
||||
union hv_synic_scontrol sctrl;
|
||||
int cpu = smp_processor_id();
|
||||
struct vmbus_channel *channel, *sc;
|
||||
bool channel_found = false;
|
||||
unsigned long flags;
|
||||
|
||||
if (!hv_context.synic_initialized)
|
||||
return;
|
||||
return -EFAULT;
|
||||
|
||||
/*
|
||||
* Search for channels which are bound to the CPU we're about to
|
||||
* cleanup. In case we find one and vmbus is still connected we need to
|
||||
* fail, this will effectively prevent CPU offlining. There is no way
|
||||
* we can re-bind channels to different CPUs for now.
|
||||
*/
|
||||
mutex_lock(&vmbus_connection.channel_mutex);
|
||||
list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
|
||||
if (channel->target_cpu == cpu) {
|
||||
channel_found = true;
|
||||
break;
|
||||
}
|
||||
spin_lock_irqsave(&channel->lock, flags);
|
||||
list_for_each_entry(sc, &channel->sc_list, sc_list) {
|
||||
if (sc->target_cpu == cpu) {
|
||||
channel_found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&channel->lock, flags);
|
||||
if (channel_found)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&vmbus_connection.channel_mutex);
|
||||
|
||||
if (channel_found && vmbus_connection.conn_state == CONNECTED)
|
||||
return -EBUSY;
|
||||
|
||||
/* Turn off clockevent device */
|
||||
if (ms_hyperv.features & HV_X64_MSR_SYNTIMER_AVAILABLE) {
|
||||
clockevents_unbind_device(hv_context.clk_evt[cpu], cpu);
|
||||
hv_ce_shutdown(hv_context.clk_evt[cpu]);
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= this_cpu_ptr(hv_context.cpu_context);
|
||||
|
||||
clockevents_unbind_device(hv_cpu->clk_evt, cpu);
|
||||
hv_ce_shutdown(hv_cpu->clk_evt);
|
||||
put_cpu_ptr(hv_cpu);
|
||||
}
|
||||
|
||||
rdmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
hv_get_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
||||
shared_sint.masked = 1;
|
||||
|
||||
/* Need to correctly cleanup in the case of SMP!!! */
|
||||
/* Disable the interrupt */
|
||||
wrmsrl(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
|
||||
hv_set_synint_state(HV_X64_MSR_SINT0 + VMBUS_MESSAGE_SINT,
|
||||
shared_sint.as_uint64);
|
||||
|
||||
rdmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
|
||||
hv_get_simp(simp.as_uint64);
|
||||
simp.simp_enabled = 0;
|
||||
simp.base_simp_gpa = 0;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SIMP, simp.as_uint64);
|
||||
hv_set_simp(simp.as_uint64);
|
||||
|
||||
rdmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
|
||||
hv_get_siefp(siefp.as_uint64);
|
||||
siefp.siefp_enabled = 0;
|
||||
siefp.base_siefp_gpa = 0;
|
||||
|
||||
wrmsrl(HV_X64_MSR_SIEFP, siefp.as_uint64);
|
||||
hv_set_siefp(siefp.as_uint64);
|
||||
|
||||
/* Disable the global synic bit */
|
||||
rdmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
hv_get_synic_state(sctrl.as_uint64);
|
||||
sctrl.enable = 0;
|
||||
wrmsrl(HV_X64_MSR_SCONTROL, sctrl.as_uint64);
|
||||
hv_set_synic_state(sctrl.as_uint64);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -587,6 +587,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
|
||||
spin_lock_irqsave(&dm_device.ha_lock, flags);
|
||||
dm_device.num_pages_onlined += mem->nr_pages;
|
||||
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
|
||||
/* Fall through */
|
||||
case MEM_CANCEL_ONLINE:
|
||||
if (dm_device.ha_waiting) {
|
||||
dm_device.ha_waiting = false;
|
||||
|
@ -31,6 +31,16 @@
|
||||
#define WIN8_SRV_MINOR 1
|
||||
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
|
||||
|
||||
#define FCOPY_VER_COUNT 1
|
||||
static const int fcopy_versions[] = {
|
||||
WIN8_SRV_VERSION
|
||||
};
|
||||
|
||||
#define FW_VER_COUNT 1
|
||||
static const int fw_versions[] = {
|
||||
UTIL_FW_VERSION
|
||||
};
|
||||
|
||||
/*
|
||||
* Global state maintained for transaction that is being processed.
|
||||
* For a class of integration services, including the "file copy service",
|
||||
@ -61,6 +71,7 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
|
||||
static const char fcopy_devname[] = "vmbus/hv_fcopy";
|
||||
static u8 *recv_buffer;
|
||||
static struct hvutil_transport *hvt;
|
||||
static struct completion release_event;
|
||||
/*
|
||||
* This state maintains the version number registered by the daemon.
|
||||
*/
|
||||
@ -227,8 +238,6 @@ void hv_fcopy_onchannelcallback(void *context)
|
||||
u64 requestid;
|
||||
struct hv_fcopy_hdr *fcopy_msg;
|
||||
struct icmsg_hdr *icmsghdr;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
int util_fw_version;
|
||||
int fcopy_srv_version;
|
||||
|
||||
if (fcopy_transaction.state > HVUTIL_READY)
|
||||
@ -242,10 +251,15 @@ void hv_fcopy_onchannelcallback(void *context)
|
||||
icmsghdr = (struct icmsg_hdr *)&recv_buffer[
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
fcopy_srv_version = WIN8_SRV_VERSION;
|
||||
vmbus_prep_negotiate_resp(icmsghdr, negop, recv_buffer,
|
||||
util_fw_version, fcopy_srv_version);
|
||||
if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
|
||||
fw_versions, FW_VER_COUNT,
|
||||
fcopy_versions, FCOPY_VER_COUNT,
|
||||
NULL, &fcopy_srv_version)) {
|
||||
|
||||
pr_info("FCopy IC version %d.%d\n",
|
||||
fcopy_srv_version >> 16,
|
||||
fcopy_srv_version & 0xFFFF);
|
||||
}
|
||||
} else {
|
||||
fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
@ -317,6 +331,7 @@ static void fcopy_on_reset(void)
|
||||
|
||||
if (cancel_delayed_work_sync(&fcopy_timeout_work))
|
||||
fcopy_respond_to_host(HV_E_FAIL);
|
||||
complete(&release_event);
|
||||
}
|
||||
|
||||
int hv_fcopy_init(struct hv_util_service *srv)
|
||||
@ -324,6 +339,7 @@ int hv_fcopy_init(struct hv_util_service *srv)
|
||||
recv_buffer = srv->recv_buffer;
|
||||
fcopy_transaction.recv_channel = srv->channel;
|
||||
|
||||
init_completion(&release_event);
|
||||
/*
|
||||
* When this driver loads, the user level daemon that
|
||||
* processes the host requests may not yet be running.
|
||||
@ -345,4 +361,5 @@ void hv_fcopy_deinit(void)
|
||||
fcopy_transaction.state = HVUTIL_DEVICE_DYING;
|
||||
cancel_delayed_work_sync(&fcopy_timeout_work);
|
||||
hvutil_transport_destroy(hvt);
|
||||
wait_for_completion(&release_event);
|
||||
}
|
||||
|
@ -46,6 +46,19 @@
|
||||
#define WIN8_SRV_MINOR 0
|
||||
#define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
|
||||
|
||||
#define KVP_VER_COUNT 3
|
||||
static const int kvp_versions[] = {
|
||||
WIN8_SRV_VERSION,
|
||||
WIN7_SRV_VERSION,
|
||||
WS2008_SRV_VERSION
|
||||
};
|
||||
|
||||
#define FW_VER_COUNT 2
|
||||
static const int fw_versions[] = {
|
||||
UTIL_FW_VERSION,
|
||||
UTIL_WS2K8_FW_VERSION
|
||||
};
|
||||
|
||||
/*
|
||||
* Global state maintained for transaction that is being processed. For a class
|
||||
* of integration services, including the "KVP service", the specified protocol
|
||||
@ -88,6 +101,7 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key);
|
||||
static const char kvp_devname[] = "vmbus/hv_kvp";
|
||||
static u8 *recv_buffer;
|
||||
static struct hvutil_transport *hvt;
|
||||
static struct completion release_event;
|
||||
/*
|
||||
* Register the kernel component with the user-level daemon.
|
||||
* As part of this registration, pass the LIC version number.
|
||||
@ -609,8 +623,6 @@ void hv_kvp_onchannelcallback(void *context)
|
||||
struct hv_kvp_msg *kvp_msg;
|
||||
|
||||
struct icmsg_hdr *icmsghdrp;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
int util_fw_version;
|
||||
int kvp_srv_version;
|
||||
static enum {NEGO_NOT_STARTED,
|
||||
NEGO_IN_PROGRESS,
|
||||
@ -639,28 +651,14 @@ void hv_kvp_onchannelcallback(void *context)
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
/*
|
||||
* Based on the host, select appropriate
|
||||
* framework and service versions we will
|
||||
* negotiate.
|
||||
*/
|
||||
switch (vmbus_proto_version) {
|
||||
case (VERSION_WS2008):
|
||||
util_fw_version = UTIL_WS2K8_FW_VERSION;
|
||||
kvp_srv_version = WS2008_SRV_VERSION;
|
||||
break;
|
||||
case (VERSION_WIN7):
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
kvp_srv_version = WIN7_SRV_VERSION;
|
||||
break;
|
||||
default:
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
kvp_srv_version = WIN8_SRV_VERSION;
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp,
|
||||
recv_buffer, fw_versions, FW_VER_COUNT,
|
||||
kvp_versions, KVP_VER_COUNT,
|
||||
NULL, &kvp_srv_version)) {
|
||||
pr_info("KVP IC version %d.%d\n",
|
||||
kvp_srv_version >> 16,
|
||||
kvp_srv_version & 0xFFFF);
|
||||
}
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
recv_buffer, util_fw_version,
|
||||
kvp_srv_version);
|
||||
|
||||
} else {
|
||||
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
@ -716,6 +714,7 @@ static void kvp_on_reset(void)
|
||||
if (cancel_delayed_work_sync(&kvp_timeout_work))
|
||||
kvp_respond_to_host(NULL, HV_E_FAIL);
|
||||
kvp_transaction.state = HVUTIL_DEVICE_INIT;
|
||||
complete(&release_event);
|
||||
}
|
||||
|
||||
int
|
||||
@ -724,6 +723,7 @@ hv_kvp_init(struct hv_util_service *srv)
|
||||
recv_buffer = srv->recv_buffer;
|
||||
kvp_transaction.recv_channel = srv->channel;
|
||||
|
||||
init_completion(&release_event);
|
||||
/*
|
||||
* When this driver loads, the user level daemon that
|
||||
* processes the host requests may not yet be running.
|
||||
@ -747,4 +747,5 @@ void hv_kvp_deinit(void)
|
||||
cancel_delayed_work_sync(&kvp_timeout_work);
|
||||
cancel_work_sync(&kvp_sendkey_work);
|
||||
hvutil_transport_destroy(hvt);
|
||||
wait_for_completion(&release_event);
|
||||
}
|
||||
|
@ -31,6 +31,16 @@
|
||||
#define VSS_MINOR 0
|
||||
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
|
||||
|
||||
#define VSS_VER_COUNT 1
|
||||
static const int vss_versions[] = {
|
||||
VSS_VERSION
|
||||
};
|
||||
|
||||
#define FW_VER_COUNT 1
|
||||
static const int fw_versions[] = {
|
||||
UTIL_FW_VERSION
|
||||
};
|
||||
|
||||
/*
|
||||
* Timeout values are based on expecations from host
|
||||
*/
|
||||
@ -69,6 +79,7 @@ static int dm_reg_value;
|
||||
static const char vss_devname[] = "vmbus/hv_vss";
|
||||
static __u8 *recv_buffer;
|
||||
static struct hvutil_transport *hvt;
|
||||
static struct completion release_event;
|
||||
|
||||
static void vss_timeout_func(struct work_struct *dummy);
|
||||
static void vss_handle_request(struct work_struct *dummy);
|
||||
@ -293,10 +304,9 @@ void hv_vss_onchannelcallback(void *context)
|
||||
u32 recvlen;
|
||||
u64 requestid;
|
||||
struct hv_vss_msg *vss_msg;
|
||||
|
||||
int vss_srv_version;
|
||||
|
||||
struct icmsg_hdr *icmsghdrp;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
if (vss_transaction.state > HVUTIL_READY)
|
||||
return;
|
||||
@ -309,9 +319,15 @@ void hv_vss_onchannelcallback(void *context)
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
recv_buffer, UTIL_FW_VERSION,
|
||||
VSS_VERSION);
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp,
|
||||
recv_buffer, fw_versions, FW_VER_COUNT,
|
||||
vss_versions, VSS_VER_COUNT,
|
||||
NULL, &vss_srv_version)) {
|
||||
|
||||
pr_info("VSS IC version %d.%d\n",
|
||||
vss_srv_version >> 16,
|
||||
vss_srv_version & 0xFFFF);
|
||||
}
|
||||
} else {
|
||||
vss_msg = (struct hv_vss_msg *)&recv_buffer[
|
||||
sizeof(struct vmbuspipe_hdr) +
|
||||
@ -345,11 +361,13 @@ static void vss_on_reset(void)
|
||||
if (cancel_delayed_work_sync(&vss_timeout_work))
|
||||
vss_respond_to_host(HV_E_FAIL);
|
||||
vss_transaction.state = HVUTIL_DEVICE_INIT;
|
||||
complete(&release_event);
|
||||
}
|
||||
|
||||
int
|
||||
hv_vss_init(struct hv_util_service *srv)
|
||||
{
|
||||
init_completion(&release_event);
|
||||
if (vmbus_proto_version < VERSION_WIN8_1) {
|
||||
pr_warn("Integration service 'Backup (volume snapshot)'"
|
||||
" not supported on this host version.\n");
|
||||
@ -382,4 +400,5 @@ void hv_vss_deinit(void)
|
||||
cancel_delayed_work_sync(&vss_timeout_work);
|
||||
cancel_work_sync(&vss_handle_request_work);
|
||||
hvutil_transport_destroy(hvt);
|
||||
wait_for_completion(&release_event);
|
||||
}
|
||||
|
@ -27,6 +27,9 @@
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <asm/mshyperv.h>
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
@ -57,7 +60,31 @@
|
||||
static int sd_srv_version;
|
||||
static int ts_srv_version;
|
||||
static int hb_srv_version;
|
||||
static int util_fw_version;
|
||||
|
||||
#define SD_VER_COUNT 2
|
||||
static const int sd_versions[] = {
|
||||
SD_VERSION,
|
||||
SD_VERSION_1
|
||||
};
|
||||
|
||||
#define TS_VER_COUNT 3
|
||||
static const int ts_versions[] = {
|
||||
TS_VERSION,
|
||||
TS_VERSION_3,
|
||||
TS_VERSION_1
|
||||
};
|
||||
|
||||
#define HB_VER_COUNT 2
|
||||
static const int hb_versions[] = {
|
||||
HB_VERSION,
|
||||
HB_VERSION_1
|
||||
};
|
||||
|
||||
#define FW_VER_COUNT 2
|
||||
static const int fw_versions[] = {
|
||||
UTIL_FW_VERSION,
|
||||
UTIL_WS2K8_FW_VERSION
|
||||
};
|
||||
|
||||
static void shutdown_onchannelcallback(void *context);
|
||||
static struct hv_util_service util_shutdown = {
|
||||
@ -118,7 +145,6 @@ static void shutdown_onchannelcallback(void *context)
|
||||
struct shutdown_msg_data *shutdown_msg;
|
||||
|
||||
struct icmsg_hdr *icmsghdrp;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
vmbus_recvpacket(channel, shut_txf_buf,
|
||||
PAGE_SIZE, &recvlen, &requestid);
|
||||
@ -128,9 +154,14 @@ static void shutdown_onchannelcallback(void *context)
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
shut_txf_buf, util_fw_version,
|
||||
sd_srv_version);
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp, shut_txf_buf,
|
||||
fw_versions, FW_VER_COUNT,
|
||||
sd_versions, SD_VER_COUNT,
|
||||
NULL, &sd_srv_version)) {
|
||||
pr_info("Shutdown IC version %d.%d\n",
|
||||
sd_srv_version >> 16,
|
||||
sd_srv_version & 0xFFFF);
|
||||
}
|
||||
} else {
|
||||
shutdown_msg =
|
||||
(struct shutdown_msg_data *)&shut_txf_buf[
|
||||
@ -181,31 +212,17 @@ struct adj_time_work {
|
||||
|
||||
static void hv_set_host_time(struct work_struct *work)
|
||||
{
|
||||
struct adj_time_work *wrk;
|
||||
s64 host_tns;
|
||||
u64 newtime;
|
||||
struct timespec host_ts;
|
||||
struct adj_time_work *wrk;
|
||||
struct timespec64 host_ts;
|
||||
u64 reftime, newtime;
|
||||
|
||||
wrk = container_of(work, struct adj_time_work, work);
|
||||
|
||||
newtime = wrk->host_time;
|
||||
if (ts_srv_version > TS_VERSION_3) {
|
||||
/*
|
||||
* Some latency has been introduced since Hyper-V generated
|
||||
* its time sample. Take that latency into account before
|
||||
* using TSC reference time sample from Hyper-V.
|
||||
*
|
||||
* This sample is given by TimeSync v4 and above hosts.
|
||||
*/
|
||||
u64 current_tick;
|
||||
reftime = hyperv_cs->read(hyperv_cs);
|
||||
newtime = wrk->host_time + (reftime - wrk->ref_time);
|
||||
host_ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
|
||||
|
||||
rdmsrl(HV_X64_MSR_TIME_REF_COUNT, current_tick);
|
||||
newtime += (current_tick - wrk->ref_time);
|
||||
}
|
||||
host_tns = (newtime - WLTIMEDELTA) * 100;
|
||||
host_ts = ns_to_timespec(host_tns);
|
||||
|
||||
do_settimeofday(&host_ts);
|
||||
do_settimeofday64(&host_ts);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -222,22 +239,60 @@ static void hv_set_host_time(struct work_struct *work)
|
||||
* to discipline the clock.
|
||||
*/
|
||||
static struct adj_time_work wrk;
|
||||
static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 flags)
|
||||
|
||||
/*
|
||||
* The last time sample, received from the host. PTP device responds to
|
||||
* requests by using this data and the current partition-wide time reference
|
||||
* count.
|
||||
*/
|
||||
static struct {
|
||||
u64 host_time;
|
||||
u64 ref_time;
|
||||
struct system_time_snapshot snap;
|
||||
spinlock_t lock;
|
||||
} host_ts;
|
||||
|
||||
static inline void adj_guesttime(u64 hosttime, u64 reftime, u8 adj_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 cur_reftime;
|
||||
|
||||
/*
|
||||
* This check is safe since we are executing in the
|
||||
* interrupt context and time synch messages arre always
|
||||
* interrupt context and time synch messages are always
|
||||
* delivered on the same CPU.
|
||||
*/
|
||||
if (work_pending(&wrk.work))
|
||||
return;
|
||||
if (adj_flags & ICTIMESYNCFLAG_SYNC) {
|
||||
/* Queue a job to do do_settimeofday64() */
|
||||
if (work_pending(&wrk.work))
|
||||
return;
|
||||
|
||||
wrk.host_time = hosttime;
|
||||
wrk.ref_time = reftime;
|
||||
wrk.flags = flags;
|
||||
if ((flags & (ICTIMESYNCFLAG_SYNC | ICTIMESYNCFLAG_SAMPLE)) != 0) {
|
||||
wrk.host_time = hosttime;
|
||||
wrk.ref_time = reftime;
|
||||
wrk.flags = adj_flags;
|
||||
schedule_work(&wrk.work);
|
||||
} else {
|
||||
/*
|
||||
* Save the adjusted time sample from the host and the snapshot
|
||||
* of the current system time for PTP device.
|
||||
*/
|
||||
spin_lock_irqsave(&host_ts.lock, flags);
|
||||
|
||||
cur_reftime = hyperv_cs->read(hyperv_cs);
|
||||
host_ts.host_time = hosttime;
|
||||
host_ts.ref_time = cur_reftime;
|
||||
ktime_get_snapshot(&host_ts.snap);
|
||||
|
||||
/*
|
||||
* TimeSync v4 messages contain reference time (guest's Hyper-V
|
||||
* clocksource read when the time sample was generated), we can
|
||||
* improve the precision by adding the delta between now and the
|
||||
* time of generation.
|
||||
*/
|
||||
if (ts_srv_version > TS_VERSION_3)
|
||||
host_ts.host_time += (cur_reftime - reftime);
|
||||
|
||||
spin_unlock_irqrestore(&host_ts.lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@ -253,7 +308,6 @@ static void timesync_onchannelcallback(void *context)
|
||||
struct ictimesync_data *timedatap;
|
||||
struct ictimesync_ref_data *refdata;
|
||||
u8 *time_txf_buf = util_timesynch.recv_buffer;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
vmbus_recvpacket(channel, time_txf_buf,
|
||||
PAGE_SIZE, &recvlen, &requestid);
|
||||
@ -263,12 +317,14 @@ static void timesync_onchannelcallback(void *context)
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
time_txf_buf,
|
||||
util_fw_version,
|
||||
ts_srv_version);
|
||||
pr_info("Using TimeSync version %d.%d\n",
|
||||
ts_srv_version >> 16, ts_srv_version & 0xFFFF);
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp, time_txf_buf,
|
||||
fw_versions, FW_VER_COUNT,
|
||||
ts_versions, TS_VER_COUNT,
|
||||
NULL, &ts_srv_version)) {
|
||||
pr_info("TimeSync IC version %d.%d\n",
|
||||
ts_srv_version >> 16,
|
||||
ts_srv_version & 0xFFFF);
|
||||
}
|
||||
} else {
|
||||
if (ts_srv_version > TS_VERSION_3) {
|
||||
refdata = (struct ictimesync_ref_data *)
|
||||
@ -312,7 +368,6 @@ static void heartbeat_onchannelcallback(void *context)
|
||||
struct icmsg_hdr *icmsghdrp;
|
||||
struct heartbeat_msg_data *heartbeat_msg;
|
||||
u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
|
||||
struct icmsg_negotiate *negop = NULL;
|
||||
|
||||
while (1) {
|
||||
|
||||
@ -326,9 +381,16 @@ static void heartbeat_onchannelcallback(void *context)
|
||||
sizeof(struct vmbuspipe_hdr)];
|
||||
|
||||
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
|
||||
vmbus_prep_negotiate_resp(icmsghdrp, negop,
|
||||
hbeat_txf_buf, util_fw_version,
|
||||
hb_srv_version);
|
||||
if (vmbus_prep_negotiate_resp(icmsghdrp,
|
||||
hbeat_txf_buf,
|
||||
fw_versions, FW_VER_COUNT,
|
||||
hb_versions, HB_VER_COUNT,
|
||||
NULL, &hb_srv_version)) {
|
||||
|
||||
pr_info("Heartbeat IC version %d.%d\n",
|
||||
hb_srv_version >> 16,
|
||||
hb_srv_version & 0xFFFF);
|
||||
}
|
||||
} else {
|
||||
heartbeat_msg =
|
||||
(struct heartbeat_msg_data *)&hbeat_txf_buf[
|
||||
@ -373,38 +435,10 @@ static int util_probe(struct hv_device *dev,
|
||||
* Turn off batched reading for all util drivers before we open the
|
||||
* channel.
|
||||
*/
|
||||
|
||||
set_channel_read_state(dev->channel, false);
|
||||
set_channel_read_mode(dev->channel, HV_CALL_DIRECT);
|
||||
|
||||
hv_set_drvdata(dev, srv);
|
||||
|
||||
/*
|
||||
* Based on the host; initialize the framework and
|
||||
* service version numbers we will negotiate.
|
||||
*/
|
||||
switch (vmbus_proto_version) {
|
||||
case (VERSION_WS2008):
|
||||
util_fw_version = UTIL_WS2K8_FW_VERSION;
|
||||
sd_srv_version = SD_VERSION_1;
|
||||
ts_srv_version = TS_VERSION_1;
|
||||
hb_srv_version = HB_VERSION_1;
|
||||
break;
|
||||
case VERSION_WIN7:
|
||||
case VERSION_WIN8:
|
||||
case VERSION_WIN8_1:
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
sd_srv_version = SD_VERSION;
|
||||
ts_srv_version = TS_VERSION_3;
|
||||
hb_srv_version = HB_VERSION;
|
||||
break;
|
||||
case VERSION_WIN10:
|
||||
default:
|
||||
util_fw_version = UTIL_FW_VERSION;
|
||||
sd_srv_version = SD_VERSION;
|
||||
ts_srv_version = TS_VERSION;
|
||||
hb_srv_version = HB_VERSION;
|
||||
}
|
||||
|
||||
ret = vmbus_open(dev->channel, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
|
||||
srv->util_cb, dev->channel);
|
||||
if (ret)
|
||||
@ -470,14 +504,113 @@ static struct hv_driver util_drv = {
|
||||
.remove = util_remove,
|
||||
};
|
||||
|
||||
static int hv_ptp_enable(struct ptp_clock_info *info,
|
||||
struct ptp_clock_request *request, int on)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hv_ptp_settime(struct ptp_clock_info *p, const struct timespec64 *ts)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hv_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
static int hv_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int hv_ptp_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 newtime, reftime;
|
||||
|
||||
spin_lock_irqsave(&host_ts.lock, flags);
|
||||
reftime = hyperv_cs->read(hyperv_cs);
|
||||
newtime = host_ts.host_time + (reftime - host_ts.ref_time);
|
||||
*ts = ns_to_timespec64((newtime - WLTIMEDELTA) * 100);
|
||||
spin_unlock_irqrestore(&host_ts.lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hv_ptp_get_syncdevicetime(ktime_t *device,
|
||||
struct system_counterval_t *system,
|
||||
void *ctx)
|
||||
{
|
||||
system->cs = hyperv_cs;
|
||||
system->cycles = host_ts.ref_time;
|
||||
*device = ns_to_ktime((host_ts.host_time - WLTIMEDELTA) * 100);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hv_ptp_getcrosststamp(struct ptp_clock_info *ptp,
|
||||
struct system_device_crosststamp *xtstamp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&host_ts.lock, flags);
|
||||
|
||||
/*
|
||||
* host_ts contains the last time sample from the host and the snapshot
|
||||
* of system time. We don't need to calculate the time delta between
|
||||
* the reception and now as get_device_system_crosststamp() does the
|
||||
* required interpolation.
|
||||
*/
|
||||
ret = get_device_system_crosststamp(hv_ptp_get_syncdevicetime,
|
||||
NULL, &host_ts.snap, xtstamp);
|
||||
|
||||
spin_unlock_irqrestore(&host_ts.lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct ptp_clock_info ptp_hyperv_info = {
|
||||
.name = "hyperv",
|
||||
.enable = hv_ptp_enable,
|
||||
.adjtime = hv_ptp_adjtime,
|
||||
.adjfreq = hv_ptp_adjfreq,
|
||||
.gettime64 = hv_ptp_gettime,
|
||||
.getcrosststamp = hv_ptp_getcrosststamp,
|
||||
.settime64 = hv_ptp_settime,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static struct ptp_clock *hv_ptp_clock;
|
||||
|
||||
static int hv_timesync_init(struct hv_util_service *srv)
|
||||
{
|
||||
/* TimeSync requires Hyper-V clocksource. */
|
||||
if (!hyperv_cs)
|
||||
return -ENODEV;
|
||||
|
||||
INIT_WORK(&wrk.work, hv_set_host_time);
|
||||
|
||||
/*
|
||||
* ptp_clock_register() returns NULL when CONFIG_PTP_1588_CLOCK is
|
||||
* disabled but the driver is still useful without the PTP device
|
||||
* as it still handles the ICTIMESYNCFLAG_SYNC case.
|
||||
*/
|
||||
hv_ptp_clock = ptp_clock_register(&ptp_hyperv_info, NULL);
|
||||
if (IS_ERR_OR_NULL(hv_ptp_clock)) {
|
||||
pr_err("cannot register PTP clock: %ld\n",
|
||||
PTR_ERR(hv_ptp_clock));
|
||||
hv_ptp_clock = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hv_timesync_deinit(void)
|
||||
{
|
||||
if (hv_ptp_clock)
|
||||
ptp_clock_unregister(hv_ptp_clock);
|
||||
cancel_work_sync(&wrk.work);
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <asm/sync_bitops.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
/*
|
||||
* Timeout for services such as KVP and fcopy.
|
||||
@ -40,95 +41,9 @@
|
||||
*/
|
||||
#define HV_UTIL_NEGO_TIMEOUT 55
|
||||
|
||||
/*
|
||||
* The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
|
||||
* is set by CPUID(HVCPUID_VERSION_FEATURES).
|
||||
*/
|
||||
enum hv_cpuid_function {
|
||||
HVCPUID_VERSION_FEATURES = 0x00000001,
|
||||
HVCPUID_VENDOR_MAXFUNCTION = 0x40000000,
|
||||
HVCPUID_INTERFACE = 0x40000001,
|
||||
|
||||
/*
|
||||
* The remaining functions depend on the value of
|
||||
* HVCPUID_INTERFACE
|
||||
*/
|
||||
HVCPUID_VERSION = 0x40000002,
|
||||
HVCPUID_FEATURES = 0x40000003,
|
||||
HVCPUID_ENLIGHTENMENT_INFO = 0x40000004,
|
||||
HVCPUID_IMPLEMENTATION_LIMITS = 0x40000005,
|
||||
};
|
||||
|
||||
#define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE 0x400
|
||||
|
||||
#define HV_X64_MSR_CRASH_P0 0x40000100
|
||||
#define HV_X64_MSR_CRASH_P1 0x40000101
|
||||
#define HV_X64_MSR_CRASH_P2 0x40000102
|
||||
#define HV_X64_MSR_CRASH_P3 0x40000103
|
||||
#define HV_X64_MSR_CRASH_P4 0x40000104
|
||||
#define HV_X64_MSR_CRASH_CTL 0x40000105
|
||||
|
||||
#define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
|
||||
|
||||
/* Define version of the synthetic interrupt controller. */
|
||||
#define HV_SYNIC_VERSION (1)
|
||||
|
||||
#define HV_ANY_VP (0xFFFFFFFF)
|
||||
|
||||
/* Define synthetic interrupt controller flag constants. */
|
||||
#define HV_EVENT_FLAGS_COUNT (256 * 8)
|
||||
#define HV_EVENT_FLAGS_BYTE_COUNT (256)
|
||||
#define HV_EVENT_FLAGS_DWORD_COUNT (256 / sizeof(u32))
|
||||
|
||||
/* Define invalid partition identifier. */
|
||||
#define HV_PARTITION_ID_INVALID ((u64)0x0)
|
||||
|
||||
/* Define port type. */
|
||||
enum hv_port_type {
|
||||
HVPORT_MSG = 1,
|
||||
HVPORT_EVENT = 2,
|
||||
HVPORT_MONITOR = 3
|
||||
};
|
||||
|
||||
/* Define port information structure. */
|
||||
struct hv_port_info {
|
||||
enum hv_port_type port_type;
|
||||
u32 padding;
|
||||
union {
|
||||
struct {
|
||||
u32 target_sint;
|
||||
u32 target_vp;
|
||||
u64 rsvdz;
|
||||
} message_port_info;
|
||||
struct {
|
||||
u32 target_sint;
|
||||
u32 target_vp;
|
||||
u16 base_flag_number;
|
||||
u16 flag_count;
|
||||
u32 rsvdz;
|
||||
} event_port_info;
|
||||
struct {
|
||||
u64 monitor_address;
|
||||
u64 rsvdz;
|
||||
} monitor_port_info;
|
||||
};
|
||||
};
|
||||
|
||||
struct hv_connection_info {
|
||||
enum hv_port_type port_type;
|
||||
u32 padding;
|
||||
union {
|
||||
struct {
|
||||
u64 rsvdz;
|
||||
} message_connection_info;
|
||||
struct {
|
||||
u64 rsvdz;
|
||||
} event_connection_info;
|
||||
struct {
|
||||
u64 monitor_address;
|
||||
} monitor_connection_info;
|
||||
};
|
||||
};
|
||||
#define HV_EVENT_FLAGS_LONG_COUNT (256 / sizeof(unsigned long))
|
||||
|
||||
/*
|
||||
* Timer configuration register.
|
||||
@ -146,18 +61,10 @@ union hv_timer_config {
|
||||
};
|
||||
};
|
||||
|
||||
/* Define the number of message buffers associated with each port. */
|
||||
#define HV_PORT_MESSAGE_BUFFER_COUNT (16)
|
||||
|
||||
/* Define the synthetic interrupt controller event flags format. */
|
||||
union hv_synic_event_flags {
|
||||
u8 flags8[HV_EVENT_FLAGS_BYTE_COUNT];
|
||||
u32 flags32[HV_EVENT_FLAGS_DWORD_COUNT];
|
||||
};
|
||||
|
||||
/* Define the synthetic interrupt flags page layout. */
|
||||
struct hv_synic_event_flags_page {
|
||||
union hv_synic_event_flags sintevent_flags[HV_SYNIC_SINT_COUNT];
|
||||
unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
|
||||
};
|
||||
|
||||
/* Define SynIC control register. */
|
||||
@ -261,6 +168,8 @@ struct hv_monitor_page {
|
||||
u8 rsvdz4[1984];
|
||||
};
|
||||
|
||||
#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
|
||||
|
||||
/* Definition of the hv_post_message hypercall input structure. */
|
||||
struct hv_input_post_message {
|
||||
union hv_connection_id connectionid;
|
||||
@ -270,56 +179,6 @@ struct hv_input_post_message {
|
||||
u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
|
||||
};
|
||||
|
||||
/*
|
||||
* Versioning definitions used for guests reporting themselves to the
|
||||
* hypervisor, and visa versa.
|
||||
*/
|
||||
|
||||
/* Version info reported by guest OS's */
|
||||
enum hv_guest_os_vendor {
|
||||
HVGUESTOS_VENDOR_MICROSOFT = 0x0001
|
||||
};
|
||||
|
||||
enum hv_guest_os_microsoft_ids {
|
||||
HVGUESTOS_MICROSOFT_UNDEFINED = 0x00,
|
||||
HVGUESTOS_MICROSOFT_MSDOS = 0x01,
|
||||
HVGUESTOS_MICROSOFT_WINDOWS3X = 0x02,
|
||||
HVGUESTOS_MICROSOFT_WINDOWS9X = 0x03,
|
||||
HVGUESTOS_MICROSOFT_WINDOWSNT = 0x04,
|
||||
HVGUESTOS_MICROSOFT_WINDOWSCE = 0x05
|
||||
};
|
||||
|
||||
/*
|
||||
* Declare the MSR used to identify the guest OS.
|
||||
*/
|
||||
#define HV_X64_MSR_GUEST_OS_ID 0x40000000
|
||||
|
||||
union hv_x64_msr_guest_os_id_contents {
|
||||
u64 as_uint64;
|
||||
struct {
|
||||
u64 build_number:16;
|
||||
u64 service_version:8; /* Service Pack, etc. */
|
||||
u64 minor_version:8;
|
||||
u64 major_version:8;
|
||||
u64 os_id:8; /* enum hv_guest_os_microsoft_ids (if Vendor=MS) */
|
||||
u64 vendor_id:16; /* enum hv_guest_os_vendor */
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* Declare the MSR used to setup pages used to communicate with the hypervisor.
|
||||
*/
|
||||
#define HV_X64_MSR_HYPERCALL 0x40000001
|
||||
|
||||
union hv_x64_msr_hypercall_contents {
|
||||
u64 as_uint64;
|
||||
struct {
|
||||
u64 enable:1;
|
||||
u64 reserved:11;
|
||||
u64 guest_physical_address:52;
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
VMBUS_MESSAGE_CONNECTION_ID = 1,
|
||||
@ -331,111 +190,44 @@ enum {
|
||||
VMBUS_MESSAGE_SINT = 2,
|
||||
};
|
||||
|
||||
/* #defines */
|
||||
|
||||
#define HV_PRESENT_BIT 0x80000000
|
||||
|
||||
/*
|
||||
* The guest OS needs to register the guest ID with the hypervisor.
|
||||
* The guest ID is a 64 bit entity and the structure of this ID is
|
||||
* specified in the Hyper-V specification:
|
||||
*
|
||||
* http://msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
|
||||
*
|
||||
* While the current guideline does not specify how Linux guest ID(s)
|
||||
* need to be generated, our plan is to publish the guidelines for
|
||||
* Linux and other guest operating systems that currently are hosted
|
||||
* on Hyper-V. The implementation here conforms to this yet
|
||||
* unpublished guidelines.
|
||||
*
|
||||
*
|
||||
* Bit(s)
|
||||
* 63 - Indicates if the OS is Open Source or not; 1 is Open Source
|
||||
* 62:56 - Os Type; Linux is 0x100
|
||||
* 55:48 - Distro specific identification
|
||||
* 47:16 - Linux kernel version number
|
||||
* 15:0 - Distro specific identification
|
||||
*
|
||||
*
|
||||
* Per cpu state for channel handling
|
||||
*/
|
||||
struct hv_per_cpu_context {
|
||||
void *synic_message_page;
|
||||
void *synic_event_page;
|
||||
/*
|
||||
* buffer to post messages to the host.
|
||||
*/
|
||||
void *post_msg_page;
|
||||
|
||||
#define HV_LINUX_VENDOR_ID 0x8100
|
||||
/*
|
||||
* Starting with win8, we can take channel interrupts on any CPU;
|
||||
* we will manage the tasklet that handles events messages on a per CPU
|
||||
* basis.
|
||||
*/
|
||||
struct tasklet_struct msg_dpc;
|
||||
|
||||
/*
|
||||
* Generate the guest ID based on the guideline described above.
|
||||
*/
|
||||
|
||||
static inline __u64 generate_guest_id(__u8 d_info1, __u32 kernel_version,
|
||||
__u16 d_info2)
|
||||
{
|
||||
__u64 guest_id = 0;
|
||||
|
||||
guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
|
||||
guest_id |= (((__u64)(d_info1)) << 48);
|
||||
guest_id |= (((__u64)(kernel_version)) << 16);
|
||||
guest_id |= ((__u64)(d_info2));
|
||||
|
||||
return guest_id;
|
||||
}
|
||||
|
||||
|
||||
#define HV_CPU_POWER_MANAGEMENT (1 << 0)
|
||||
#define HV_RECOMMENDATIONS_MAX 4
|
||||
|
||||
#define HV_X64_MAX 5
|
||||
#define HV_CAPS_MAX 8
|
||||
|
||||
|
||||
#define HV_HYPERCALL_PARAM_ALIGN sizeof(u64)
|
||||
|
||||
|
||||
/* Service definitions */
|
||||
|
||||
#define HV_SERVICE_PARENT_PORT (0)
|
||||
#define HV_SERVICE_PARENT_CONNECTION (0)
|
||||
|
||||
#define HV_SERVICE_CONNECT_RESPONSE_SUCCESS (0)
|
||||
#define HV_SERVICE_CONNECT_RESPONSE_INVALID_PARAMETER (1)
|
||||
#define HV_SERVICE_CONNECT_RESPONSE_UNKNOWN_SERVICE (2)
|
||||
#define HV_SERVICE_CONNECT_RESPONSE_CONNECTION_REJECTED (3)
|
||||
|
||||
#define HV_SERVICE_CONNECT_REQUEST_MESSAGE_ID (1)
|
||||
#define HV_SERVICE_CONNECT_RESPONSE_MESSAGE_ID (2)
|
||||
#define HV_SERVICE_DISCONNECT_REQUEST_MESSAGE_ID (3)
|
||||
#define HV_SERVICE_DISCONNECT_RESPONSE_MESSAGE_ID (4)
|
||||
#define HV_SERVICE_MAX_MESSAGE_ID (4)
|
||||
|
||||
#define HV_SERVICE_PROTOCOL_VERSION (0x0010)
|
||||
#define HV_CONNECT_PAYLOAD_BYTE_COUNT 64
|
||||
|
||||
/* #define VMBUS_REVISION_NUMBER 6 */
|
||||
|
||||
/* Our local vmbus's port and connection id. Anything >0 is fine */
|
||||
/* #define VMBUS_PORT_ID 11 */
|
||||
|
||||
/* 628180B8-308D-4c5e-B7DB-1BEB62E62EF4 */
|
||||
static const uuid_le VMBUS_SERVICE_ID = {
|
||||
.b = {
|
||||
0xb8, 0x80, 0x81, 0x62, 0x8d, 0x30, 0x5e, 0x4c,
|
||||
0xb7, 0xdb, 0x1b, 0xeb, 0x62, 0xe6, 0x2e, 0xf4
|
||||
},
|
||||
/*
|
||||
* To optimize the mapping of relid to channel, maintain
|
||||
* per-cpu list of the channels based on their CPU affinity.
|
||||
*/
|
||||
struct list_head chan_list;
|
||||
struct clock_event_device *clk_evt;
|
||||
};
|
||||
|
||||
|
||||
|
||||
struct hv_context {
|
||||
/* We only support running on top of Hyper-V
|
||||
* So at this point this really can only contain the Hyper-V ID
|
||||
*/
|
||||
u64 guestid;
|
||||
|
||||
void *hypercall_page;
|
||||
void *tsc_page;
|
||||
|
||||
bool synic_initialized;
|
||||
|
||||
void *synic_message_page[NR_CPUS];
|
||||
void *synic_event_page[NR_CPUS];
|
||||
struct hv_per_cpu_context __percpu *cpu_context;
|
||||
|
||||
/*
|
||||
* Hypervisor's notion of virtual processor ID is different from
|
||||
* Linux' notion of CPU ID. This information can only be retrieved
|
||||
@ -446,26 +238,7 @@ struct hv_context {
|
||||
* Linux cpuid 'a'.
|
||||
*/
|
||||
u32 vp_index[NR_CPUS];
|
||||
/*
|
||||
* Starting with win8, we can take channel interrupts on any CPU;
|
||||
* we will manage the tasklet that handles events messages on a per CPU
|
||||
* basis.
|
||||
*/
|
||||
struct tasklet_struct *event_dpc[NR_CPUS];
|
||||
struct tasklet_struct *msg_dpc[NR_CPUS];
|
||||
/*
|
||||
* To optimize the mapping of relid to channel, maintain
|
||||
* per-cpu list of the channels based on their CPU affinity.
|
||||
*/
|
||||
struct list_head percpu_list[NR_CPUS];
|
||||
/*
|
||||
* buffer to post messages to the host.
|
||||
*/
|
||||
void *post_msg_page[NR_CPUS];
|
||||
/*
|
||||
* Support PV clockevent device.
|
||||
*/
|
||||
struct clock_event_device *clk_evt[NR_CPUS];
|
||||
|
||||
/*
|
||||
* To manage allocations in a NUMA node.
|
||||
* Array indexed by numa node ID.
|
||||
@ -475,14 +248,6 @@ struct hv_context {
|
||||
|
||||
extern struct hv_context hv_context;
|
||||
|
||||
struct ms_hyperv_tsc_page {
|
||||
volatile u32 tsc_sequence;
|
||||
u32 reserved1;
|
||||
volatile u64 tsc_scale;
|
||||
volatile s64 tsc_offset;
|
||||
u64 reserved2[509];
|
||||
};
|
||||
|
||||
struct hv_ring_buffer_debug_info {
|
||||
u32 current_interrupt_mask;
|
||||
u32 current_read_index;
|
||||
@ -495,8 +260,6 @@ struct hv_ring_buffer_debug_info {
|
||||
|
||||
extern int hv_init(void);
|
||||
|
||||
extern void hv_cleanup(bool crash);
|
||||
|
||||
extern int hv_post_message(union hv_connection_id connection_id,
|
||||
enum hv_message_type message_type,
|
||||
void *payload, size_t payload_size);
|
||||
@ -505,20 +268,12 @@ extern int hv_synic_alloc(void);
|
||||
|
||||
extern void hv_synic_free(void);
|
||||
|
||||
extern void hv_synic_init(void *irqarg);
|
||||
extern int hv_synic_init(unsigned int cpu);
|
||||
|
||||
extern void hv_synic_cleanup(void *arg);
|
||||
extern int hv_synic_cleanup(unsigned int cpu);
|
||||
|
||||
extern void hv_synic_clockevents_cleanup(void);
|
||||
|
||||
/*
|
||||
* Host version information.
|
||||
*/
|
||||
extern unsigned int host_info_eax;
|
||||
extern unsigned int host_info_ebx;
|
||||
extern unsigned int host_info_ecx;
|
||||
extern unsigned int host_info_edx;
|
||||
|
||||
/* Interface */
|
||||
|
||||
|
||||
@ -528,20 +283,14 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
|
||||
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
|
||||
|
||||
int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
struct kvec *kv_list,
|
||||
u32 kv_count, bool lock,
|
||||
bool kick_q);
|
||||
const struct kvec *kv_list, u32 kv_count);
|
||||
|
||||
int hv_ringbuffer_read(struct vmbus_channel *channel,
|
||||
void *buffer, u32 buflen, u32 *buffer_actual_len,
|
||||
u64 *requestid, bool raw);
|
||||
|
||||
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
void hv_begin_read(struct hv_ring_buffer_info *rbi);
|
||||
|
||||
u32 hv_end_read(struct hv_ring_buffer_info *rbi);
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info);
|
||||
|
||||
/*
|
||||
* Maximum channels is determined by the size of the interrupt page
|
||||
@ -608,6 +357,11 @@ struct vmbus_msginfo {
|
||||
|
||||
extern struct vmbus_connection vmbus_connection;
|
||||
|
||||
static inline void vmbus_send_interrupt(u32 relid)
|
||||
{
|
||||
sync_set_bit(relid, vmbus_connection.send_int_page);
|
||||
}
|
||||
|
||||
enum vmbus_message_handler_type {
|
||||
/* The related handler can sleep. */
|
||||
VMHT_BLOCKING = 0,
|
||||
@ -625,41 +379,6 @@ struct vmbus_channel_message_table_entry {
|
||||
extern struct vmbus_channel_message_table_entry
|
||||
channel_message_table[CHANNELMSG_COUNT];
|
||||
|
||||
/* Free the message slot and signal end-of-message if required */
|
||||
static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
|
||||
{
|
||||
/*
|
||||
* On crash we're reading some other CPU's message page and we need
|
||||
* to be careful: this other CPU may already had cleared the header
|
||||
* and the host may already had delivered some other message there.
|
||||
* In case we blindly write msg->header.message_type we're going
|
||||
* to lose it. We can still lose a message of the same type but
|
||||
* we count on the fact that there can only be one
|
||||
* CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
|
||||
* on crash.
|
||||
*/
|
||||
if (cmpxchg(&msg->header.message_type, old_msg_type,
|
||||
HVMSG_NONE) != old_msg_type)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Make sure the write to MessageType (ie set to
|
||||
* HVMSG_NONE) happens before we read the
|
||||
* MessagePending and EOMing. Otherwise, the EOMing
|
||||
* will not deliver any more messages since there is
|
||||
* no empty slot
|
||||
*/
|
||||
mb();
|
||||
|
||||
if (msg->header.message_flags.msg_pending) {
|
||||
/*
|
||||
* This will cause message queue rescan to
|
||||
* possibly deliver another msg from the
|
||||
* hypervisor
|
||||
*/
|
||||
wrmsrl(HV_X64_MSR_EOM, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* General vmbus interface */
|
||||
|
||||
@ -670,10 +389,6 @@ struct hv_device *vmbus_device_create(const uuid_le *type,
|
||||
int vmbus_device_register(struct hv_device *child_device_obj);
|
||||
void vmbus_device_unregister(struct hv_device *device_obj);
|
||||
|
||||
/* static void */
|
||||
/* VmbusChildDeviceDestroy( */
|
||||
/* struct hv_device *); */
|
||||
|
||||
struct vmbus_channel *relid2channel(u32 relid);
|
||||
|
||||
void vmbus_free_channels(void);
|
||||
@ -683,7 +398,7 @@ void vmbus_free_channels(void);
|
||||
int vmbus_connect(void);
|
||||
void vmbus_disconnect(void);
|
||||
|
||||
int vmbus_post_msg(void *buffer, size_t buflen);
|
||||
int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
|
||||
|
||||
void vmbus_on_event(unsigned long data);
|
||||
void vmbus_on_msg_dpc(unsigned long data);
|
||||
|
@ -32,26 +32,6 @@
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
void hv_begin_read(struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
rbi->ring_buffer->interrupt_mask = 1;
|
||||
virt_mb();
|
||||
}
|
||||
|
||||
u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
||||
{
|
||||
|
||||
rbi->ring_buffer->interrupt_mask = 0;
|
||||
virt_mb();
|
||||
|
||||
/*
|
||||
* Now check to see if the ring buffer is still empty.
|
||||
* If it is not, we raced and we need to process new
|
||||
* incoming messages.
|
||||
*/
|
||||
return hv_get_bytes_to_read(rbi);
|
||||
}
|
||||
|
||||
/*
|
||||
* When we write to the ring buffer, check if the host needs to
|
||||
* be signaled. Here is the details of this protocol:
|
||||
@ -77,8 +57,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi)
|
||||
* host logic is fixed.
|
||||
*/
|
||||
|
||||
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel,
|
||||
bool kick_q)
|
||||
static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel)
|
||||
{
|
||||
struct hv_ring_buffer_info *rbi = &channel->outbound;
|
||||
|
||||
@ -117,11 +96,9 @@ hv_set_next_write_location(struct hv_ring_buffer_info *ring_info,
|
||||
|
||||
/* Get the next read location for the specified ring buffer. */
|
||||
static inline u32
|
||||
hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
|
||||
hv_get_next_read_location(const struct hv_ring_buffer_info *ring_info)
|
||||
{
|
||||
u32 next = ring_info->ring_buffer->read_index;
|
||||
|
||||
return next;
|
||||
return ring_info->ring_buffer->read_index;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -129,13 +106,14 @@ hv_get_next_read_location(struct hv_ring_buffer_info *ring_info)
|
||||
* This allows the caller to skip.
|
||||
*/
|
||||
static inline u32
|
||||
hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info,
|
||||
u32 offset)
|
||||
hv_get_next_readlocation_withoffset(const struct hv_ring_buffer_info *ring_info,
|
||||
u32 offset)
|
||||
{
|
||||
u32 next = ring_info->ring_buffer->read_index;
|
||||
|
||||
next += offset;
|
||||
next %= ring_info->ring_datasize;
|
||||
if (next >= ring_info->ring_datasize)
|
||||
next -= ring_info->ring_datasize;
|
||||
|
||||
return next;
|
||||
}
|
||||
@ -151,7 +129,7 @@ hv_set_next_read_location(struct hv_ring_buffer_info *ring_info,
|
||||
|
||||
/* Get the size of the ring buffer. */
|
||||
static inline u32
|
||||
hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info)
|
||||
hv_get_ring_buffersize(const struct hv_ring_buffer_info *ring_info)
|
||||
{
|
||||
return ring_info->ring_datasize;
|
||||
}
|
||||
@ -168,7 +146,7 @@ hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info)
|
||||
* Assume there is enough room. Handles wrap-around in src case only!!
|
||||
*/
|
||||
static u32 hv_copyfrom_ringbuffer(
|
||||
struct hv_ring_buffer_info *ring_info,
|
||||
const struct hv_ring_buffer_info *ring_info,
|
||||
void *dest,
|
||||
u32 destlen,
|
||||
u32 start_read_offset)
|
||||
@ -179,7 +157,8 @@ static u32 hv_copyfrom_ringbuffer(
|
||||
memcpy(dest, ring_buffer + start_read_offset, destlen);
|
||||
|
||||
start_read_offset += destlen;
|
||||
start_read_offset %= ring_buffer_size;
|
||||
if (start_read_offset >= ring_buffer_size)
|
||||
start_read_offset -= ring_buffer_size;
|
||||
|
||||
return start_read_offset;
|
||||
}
|
||||
@ -192,7 +171,7 @@ static u32 hv_copyfrom_ringbuffer(
|
||||
static u32 hv_copyto_ringbuffer(
|
||||
struct hv_ring_buffer_info *ring_info,
|
||||
u32 start_write_offset,
|
||||
void *src,
|
||||
const void *src,
|
||||
u32 srclen)
|
||||
{
|
||||
void *ring_buffer = hv_get_ring_buffer(ring_info);
|
||||
@ -201,14 +180,15 @@ static u32 hv_copyto_ringbuffer(
|
||||
memcpy(ring_buffer + start_write_offset, src, srclen);
|
||||
|
||||
start_write_offset += srclen;
|
||||
start_write_offset %= ring_buffer_size;
|
||||
if (start_write_offset >= ring_buffer_size)
|
||||
start_write_offset -= ring_buffer_size;
|
||||
|
||||
return start_write_offset;
|
||||
}
|
||||
|
||||
/* Get various debug metrics for the specified ring buffer. */
|
||||
void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
|
||||
struct hv_ring_buffer_debug_info *debug_info)
|
||||
{
|
||||
u32 bytes_avail_towrite;
|
||||
u32 bytes_avail_toread;
|
||||
@ -285,8 +265,7 @@ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
|
||||
|
||||
/* Write to the ring buffer. */
|
||||
int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
struct kvec *kv_list, u32 kv_count, bool lock,
|
||||
bool kick_q)
|
||||
const struct kvec *kv_list, u32 kv_count)
|
||||
{
|
||||
int i = 0;
|
||||
u32 bytes_avail_towrite;
|
||||
@ -298,13 +277,15 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
unsigned long flags = 0;
|
||||
struct hv_ring_buffer_info *outring_info = &channel->outbound;
|
||||
|
||||
if (channel->rescind)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < kv_count; i++)
|
||||
totalbytes_towrite += kv_list[i].iov_len;
|
||||
|
||||
totalbytes_towrite += sizeof(u64);
|
||||
|
||||
if (lock)
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
spin_lock_irqsave(&outring_info->ring_lock, flags);
|
||||
|
||||
bytes_avail_towrite = hv_get_bytes_to_write(outring_info);
|
||||
|
||||
@ -314,8 +295,7 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
* is empty since the read index == write index.
|
||||
*/
|
||||
if (bytes_avail_towrite <= totalbytes_towrite) {
|
||||
if (lock)
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
@ -346,10 +326,13 @@ int hv_ringbuffer_write(struct vmbus_channel *channel,
|
||||
hv_set_next_write_location(outring_info, next_write_location);
|
||||
|
||||
|
||||
if (lock)
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
spin_unlock_irqrestore(&outring_info->ring_lock, flags);
|
||||
|
||||
hv_signal_on_write(old_write, channel);
|
||||
|
||||
if (channel->rescind)
|
||||
return -ENODEV;
|
||||
|
||||
hv_signal_on_write(old_write, channel, kick_q);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -54,31 +54,7 @@ static struct acpi_device *hv_acpi_dev;
|
||||
|
||||
static struct completion probe_event;
|
||||
|
||||
|
||||
static void hyperv_report_panic(struct pt_regs *regs)
|
||||
{
|
||||
static bool panic_reported;
|
||||
|
||||
/*
|
||||
* We prefer to report panic on 'die' chain as we have proper
|
||||
* registers to report, but if we miss it (e.g. on BUG()) we need
|
||||
* to report it on 'panic'.
|
||||
*/
|
||||
if (panic_reported)
|
||||
return;
|
||||
panic_reported = true;
|
||||
|
||||
wrmsrl(HV_X64_MSR_CRASH_P0, regs->ip);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P1, regs->ax);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P2, regs->bx);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P3, regs->cx);
|
||||
wrmsrl(HV_X64_MSR_CRASH_P4, regs->dx);
|
||||
|
||||
/*
|
||||
* Let Hyper-V know there is crash data available
|
||||
*/
|
||||
wrmsrl(HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_CRASH_NOTIFY);
|
||||
}
|
||||
static int hyperv_cpuhp_online;
|
||||
|
||||
static int hyperv_panic_event(struct notifier_block *nb, unsigned long val,
|
||||
void *args)
|
||||
@ -859,9 +835,10 @@ static void vmbus_onmessage_work(struct work_struct *work)
|
||||
kfree(ctx);
|
||||
}
|
||||
|
||||
static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
|
||||
static void hv_process_timer_expiration(struct hv_message *msg,
|
||||
struct hv_per_cpu_context *hv_cpu)
|
||||
{
|
||||
struct clock_event_device *dev = hv_context.clk_evt[cpu];
|
||||
struct clock_event_device *dev = hv_cpu->clk_evt;
|
||||
|
||||
if (dev->event_handler)
|
||||
dev->event_handler(dev);
|
||||
@ -871,8 +848,8 @@ static void hv_process_timer_expiration(struct hv_message *msg, int cpu)
|
||||
|
||||
void vmbus_on_msg_dpc(unsigned long data)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
void *page_addr = hv_context.synic_message_page[cpu];
|
||||
struct hv_per_cpu_context *hv_cpu = (void *)data;
|
||||
void *page_addr = hv_cpu->synic_message_page;
|
||||
struct hv_message *msg = (struct hv_message *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
struct vmbus_channel_message_header *hdr;
|
||||
@ -908,16 +885,88 @@ msg_handled:
|
||||
vmbus_signal_eom(msg, message_type);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Direct callback for channels using other deferred processing
|
||||
*/
|
||||
static void vmbus_channel_isr(struct vmbus_channel *channel)
|
||||
{
|
||||
void (*callback_fn)(void *);
|
||||
|
||||
callback_fn = READ_ONCE(channel->onchannel_callback);
|
||||
if (likely(callback_fn != NULL))
|
||||
(*callback_fn)(channel->channel_callback_context);
|
||||
}
|
||||
|
||||
/*
|
||||
* Schedule all channels with events pending
|
||||
*/
|
||||
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
|
||||
{
|
||||
unsigned long *recv_int_page;
|
||||
u32 maxbits, relid;
|
||||
|
||||
if (vmbus_proto_version < VERSION_WIN8) {
|
||||
maxbits = MAX_NUM_CHANNELS_SUPPORTED;
|
||||
recv_int_page = vmbus_connection.recv_int_page;
|
||||
} else {
|
||||
/*
|
||||
* When the host is win8 and beyond, the event page
|
||||
* can be directly checked to get the id of the channel
|
||||
* that has the interrupt pending.
|
||||
*/
|
||||
void *page_addr = hv_cpu->synic_event_page;
|
||||
union hv_synic_event_flags *event
|
||||
= (union hv_synic_event_flags *)page_addr +
|
||||
VMBUS_MESSAGE_SINT;
|
||||
|
||||
maxbits = HV_EVENT_FLAGS_COUNT;
|
||||
recv_int_page = event->flags;
|
||||
}
|
||||
|
||||
if (unlikely(!recv_int_page))
|
||||
return;
|
||||
|
||||
for_each_set_bit(relid, recv_int_page, maxbits) {
|
||||
struct vmbus_channel *channel;
|
||||
|
||||
if (!sync_test_and_clear_bit(relid, recv_int_page))
|
||||
continue;
|
||||
|
||||
/* Special case - vmbus channel protocol msg */
|
||||
if (relid == 0)
|
||||
continue;
|
||||
|
||||
/* Find channel based on relid */
|
||||
list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) {
|
||||
if (channel->offermsg.child_relid != relid)
|
||||
continue;
|
||||
|
||||
switch (channel->callback_mode) {
|
||||
case HV_CALL_ISR:
|
||||
vmbus_channel_isr(channel);
|
||||
break;
|
||||
|
||||
case HV_CALL_BATCHED:
|
||||
hv_begin_read(&channel->inbound);
|
||||
/* fallthrough */
|
||||
case HV_CALL_DIRECT:
|
||||
tasklet_schedule(&channel->callback_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vmbus_isr(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
void *page_addr;
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= this_cpu_ptr(hv_context.cpu_context);
|
||||
void *page_addr = hv_cpu->synic_event_page;
|
||||
struct hv_message *msg;
|
||||
union hv_synic_event_flags *event;
|
||||
bool handled = false;
|
||||
|
||||
page_addr = hv_context.synic_event_page[cpu];
|
||||
if (page_addr == NULL)
|
||||
if (unlikely(page_addr == NULL))
|
||||
return;
|
||||
|
||||
event = (union hv_synic_event_flags *)page_addr +
|
||||
@ -932,10 +981,8 @@ static void vmbus_isr(void)
|
||||
(vmbus_proto_version == VERSION_WIN7)) {
|
||||
|
||||
/* Since we are a child, we only need to check bit 0 */
|
||||
if (sync_test_and_clear_bit(0,
|
||||
(unsigned long *) &event->flags32[0])) {
|
||||
if (sync_test_and_clear_bit(0, event->flags))
|
||||
handled = true;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Our host is win8 or above. The signaling mechanism
|
||||
@ -947,18 +994,17 @@ static void vmbus_isr(void)
|
||||
}
|
||||
|
||||
if (handled)
|
||||
tasklet_schedule(hv_context.event_dpc[cpu]);
|
||||
vmbus_chan_sched(hv_cpu);
|
||||
|
||||
|
||||
page_addr = hv_context.synic_message_page[cpu];
|
||||
page_addr = hv_cpu->synic_message_page;
|
||||
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
|
||||
|
||||
/* Check if there are actual msgs to be processed */
|
||||
if (msg->header.message_type != HVMSG_NONE) {
|
||||
if (msg->header.message_type == HVMSG_TIMER_EXPIRED)
|
||||
hv_process_timer_expiration(msg, cpu);
|
||||
hv_process_timer_expiration(msg, hv_cpu);
|
||||
else
|
||||
tasklet_schedule(hv_context.msg_dpc[cpu]);
|
||||
tasklet_schedule(&hv_cpu->msg_dpc);
|
||||
}
|
||||
|
||||
add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
|
||||
@ -986,7 +1032,7 @@ static int vmbus_bus_init(void)
|
||||
|
||||
ret = bus_register(&hv_bus);
|
||||
if (ret)
|
||||
goto err_cleanup;
|
||||
return ret;
|
||||
|
||||
hv_setup_vmbus_irq(vmbus_isr);
|
||||
|
||||
@ -997,14 +1043,16 @@ static int vmbus_bus_init(void)
|
||||
* Initialize the per-cpu interrupt state and
|
||||
* connect to the host.
|
||||
*/
|
||||
on_each_cpu(hv_synic_init, NULL, 1);
|
||||
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/hyperv:online",
|
||||
hv_synic_init, hv_synic_cleanup);
|
||||
if (ret < 0)
|
||||
goto err_alloc;
|
||||
hyperv_cpuhp_online = ret;
|
||||
|
||||
ret = vmbus_connect();
|
||||
if (ret)
|
||||
goto err_connect;
|
||||
|
||||
if (vmbus_proto_version > VERSION_WIN7)
|
||||
cpu_hotplug_disable();
|
||||
|
||||
/*
|
||||
* Only register if the crash MSRs are available
|
||||
*/
|
||||
@ -1019,16 +1067,13 @@ static int vmbus_bus_init(void)
|
||||
return 0;
|
||||
|
||||
err_connect:
|
||||
on_each_cpu(hv_synic_cleanup, NULL, 1);
|
||||
cpuhp_remove_state(hyperv_cpuhp_online);
|
||||
err_alloc:
|
||||
hv_synic_free();
|
||||
hv_remove_vmbus_irq();
|
||||
|
||||
bus_unregister(&hv_bus);
|
||||
|
||||
err_cleanup:
|
||||
hv_cleanup(false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1478,13 +1523,13 @@ static struct acpi_driver vmbus_acpi_driver = {
|
||||
|
||||
static void hv_kexec_handler(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
hv_synic_clockevents_cleanup();
|
||||
vmbus_initiate_unload(false);
|
||||
for_each_online_cpu(cpu)
|
||||
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
|
||||
hv_cleanup(false);
|
||||
vmbus_connection.conn_state = DISCONNECTED;
|
||||
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
|
||||
mb();
|
||||
cpuhp_remove_state(hyperv_cpuhp_online);
|
||||
hyperv_cleanup();
|
||||
};
|
||||
|
||||
static void hv_crash_handler(struct pt_regs *regs)
|
||||
@ -1495,8 +1540,9 @@ static void hv_crash_handler(struct pt_regs *regs)
|
||||
* doing the cleanup for current CPU only. This should be sufficient
|
||||
* for kdump.
|
||||
*/
|
||||
hv_synic_cleanup(NULL);
|
||||
hv_cleanup(true);
|
||||
vmbus_connection.conn_state = DISCONNECTED;
|
||||
hv_synic_cleanup(smp_processor_id());
|
||||
hyperv_cleanup();
|
||||
};
|
||||
|
||||
static int __init hv_acpi_init(void)
|
||||
@ -1547,24 +1593,24 @@ static void __exit vmbus_exit(void)
|
||||
hv_synic_clockevents_cleanup();
|
||||
vmbus_disconnect();
|
||||
hv_remove_vmbus_irq();
|
||||
for_each_online_cpu(cpu)
|
||||
tasklet_kill(hv_context.msg_dpc[cpu]);
|
||||
for_each_online_cpu(cpu) {
|
||||
struct hv_per_cpu_context *hv_cpu
|
||||
= per_cpu_ptr(hv_context.cpu_context, cpu);
|
||||
|
||||
tasklet_kill(&hv_cpu->msg_dpc);
|
||||
}
|
||||
vmbus_free_channels();
|
||||
|
||||
if (ms_hyperv.misc_features & HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE) {
|
||||
unregister_die_notifier(&hyperv_die_block);
|
||||
atomic_notifier_chain_unregister(&panic_notifier_list,
|
||||
&hyperv_panic_block);
|
||||
}
|
||||
bus_unregister(&hv_bus);
|
||||
hv_cleanup(false);
|
||||
for_each_online_cpu(cpu) {
|
||||
tasklet_kill(hv_context.event_dpc[cpu]);
|
||||
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
|
||||
}
|
||||
|
||||
cpuhp_remove_state(hyperv_cpuhp_online);
|
||||
hv_synic_free();
|
||||
acpi_bus_unregister_driver(&vmbus_acpi_driver);
|
||||
if (vmbus_proto_version > VERSION_WIN7)
|
||||
cpu_hotplug_enable();
|
||||
}
|
||||
|
||||
|
||||
|
@ -242,6 +242,7 @@ static void *etm_setup_aux(int event_cpu, void **pages,
|
||||
if (!sink_ops(sink)->alloc_buffer)
|
||||
goto err;
|
||||
|
||||
cpu = cpumask_first(mask);
|
||||
/* Get the AUX specific data from the sink buffer */
|
||||
event_data->snk_config =
|
||||
sink_ops(sink)->alloc_buffer(sink, cpu, pages,
|
||||
|
@ -216,10 +216,14 @@ static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
|
||||
goto out;
|
||||
|
||||
/* Go from generic option to ETMv4 specifics */
|
||||
if (attr->config & BIT(ETM_OPT_CYCACC))
|
||||
config->cfg |= ETMv4_MODE_CYCACC;
|
||||
if (attr->config & BIT(ETM_OPT_CYCACC)) {
|
||||
config->cfg |= BIT(4);
|
||||
/* TRM: Must program this for cycacc to work */
|
||||
config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT;
|
||||
}
|
||||
if (attr->config & BIT(ETM_OPT_TS))
|
||||
config->cfg |= ETMv4_MODE_TIMESTAMP;
|
||||
/* bit[11], Global timestamp tracing bit */
|
||||
config->cfg |= BIT(11);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -146,6 +146,7 @@
|
||||
#define ETM_ARCH_V4 0x40
|
||||
#define ETMv4_SYNC_MASK 0x1F
|
||||
#define ETM_CYC_THRESHOLD_MASK 0xFFF
|
||||
#define ETM_CYC_THRESHOLD_DEFAULT 0x100
|
||||
#define ETMv4_EVENT_MASK 0xFF
|
||||
#define ETM_CNTR_MAX_VAL 0xFFFF
|
||||
#define ETM_TRACEID_MASK 0x3f
|
||||
|
@ -356,7 +356,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
|
||||
if (!drvdata || !drvdata->csdev)
|
||||
return;
|
||||
|
||||
stm_disable(drvdata->csdev, NULL);
|
||||
coresight_disable(drvdata->csdev);
|
||||
}
|
||||
|
||||
static phys_addr_t
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/platform_data/ti-aemif.h>
|
||||
|
||||
#define TA_SHIFT 2
|
||||
#define RHOLD_SHIFT 4
|
||||
@ -335,6 +336,8 @@ static int aemif_probe(struct platform_device *pdev)
|
||||
struct device_node *np = dev->of_node;
|
||||
struct device_node *child_np;
|
||||
struct aemif_device *aemif;
|
||||
struct aemif_platform_data *pdata;
|
||||
struct of_dev_auxdata *dev_lookup;
|
||||
|
||||
if (np == NULL)
|
||||
return 0;
|
||||
@ -343,6 +346,9 @@ static int aemif_probe(struct platform_device *pdev)
|
||||
if (!aemif)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
dev_lookup = pdata ? pdata->dev_lookup : NULL;
|
||||
|
||||
platform_set_drvdata(pdev, aemif);
|
||||
|
||||
aemif->clk = devm_clk_get(dev, NULL);
|
||||
@ -390,7 +396,7 @@ static int aemif_probe(struct platform_device *pdev)
|
||||
* parameters are set.
|
||||
*/
|
||||
for_each_available_child_of_node(np, child_np) {
|
||||
ret = of_platform_populate(child_np, NULL, NULL, dev);
|
||||
ret = of_platform_populate(child_np, NULL, dev_lookup, dev);
|
||||
if (ret < 0)
|
||||
goto error;
|
||||
}
|
||||
|
@ -474,11 +474,15 @@ config SRAM
|
||||
bool "Generic on-chip SRAM driver"
|
||||
depends on HAS_IOMEM
|
||||
select GENERIC_ALLOCATOR
|
||||
select SRAM_EXEC if ARM
|
||||
help
|
||||
This driver allows you to declare a memory region to be managed by
|
||||
the genalloc API. It is supposed to be used for small on-chip SRAM
|
||||
areas found on many SoCs.
|
||||
|
||||
config SRAM_EXEC
|
||||
bool
|
||||
|
||||
config VEXPRESS_SYSCFG
|
||||
bool "Versatile Express System Configuration driver"
|
||||
depends on VEXPRESS_CONFIG
|
||||
@ -487,6 +491,7 @@ config VEXPRESS_SYSCFG
|
||||
ARM Ltd. Versatile Express uses specialised platform configuration
|
||||
bus. System Configuration interface is one of the possible means
|
||||
of generating transactions on this bus.
|
||||
|
||||
config PANEL
|
||||
tristate "Parallel port LCD/Keypad Panel support"
|
||||
depends on PARPORT
|
||||
@ -494,14 +499,14 @@ config PANEL
|
||||
Say Y here if you have an HD44780 or KS-0074 LCD connected to your
|
||||
parallel port. This driver also features 4 and 6-key keypads. The LCD
|
||||
is accessible through the /dev/lcd char device (10, 156), and the
|
||||
keypad through /dev/keypad (10, 185). Both require misc device to be
|
||||
enabled. This code can either be compiled as a module, or linked into
|
||||
the kernel and started at boot. If you don't understand what all this
|
||||
is about, say N.
|
||||
keypad through /dev/keypad (10, 185). This code can either be
|
||||
compiled as a module, or linked into the kernel and started at boot.
|
||||
If you don't understand what all this is about, say N.
|
||||
|
||||
if PANEL
|
||||
|
||||
config PANEL_PARPORT
|
||||
int "Default parallel port number (0=LPT1)"
|
||||
depends on PANEL
|
||||
range 0 255
|
||||
default "0"
|
||||
---help---
|
||||
@ -513,7 +518,6 @@ config PANEL_PARPORT
|
||||
|
||||
config PANEL_PROFILE
|
||||
int "Default panel profile (0-5, 0=custom)"
|
||||
depends on PANEL
|
||||
range 0 5
|
||||
default "5"
|
||||
---help---
|
||||
@ -534,7 +538,7 @@ config PANEL_PROFILE
|
||||
for experts.
|
||||
|
||||
config PANEL_KEYPAD
|
||||
depends on PANEL && PANEL_PROFILE="0"
|
||||
depends on PANEL_PROFILE="0"
|
||||
int "Keypad type (0=none, 1=old 6 keys, 2=new 6 keys, 3=Nexcom 4 keys)"
|
||||
range 0 3
|
||||
default 0
|
||||
@ -551,7 +555,7 @@ config PANEL_KEYPAD
|
||||
supports simultaneous keys pressed when the keypad supports them.
|
||||
|
||||
config PANEL_LCD
|
||||
depends on PANEL && PANEL_PROFILE="0"
|
||||
depends on PANEL_PROFILE="0"
|
||||
int "LCD type (0=none, 1=custom, 2=old //, 3=ks0074, 4=hantronix, 5=Nexcom)"
|
||||
range 0 5
|
||||
default 0
|
||||
@ -574,7 +578,7 @@ config PANEL_LCD
|
||||
that those values changed from the 2.4 driver for better consistency.
|
||||
|
||||
config PANEL_LCD_HEIGHT
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "Number of lines on the LCD (1-2)"
|
||||
range 1 2
|
||||
default 2
|
||||
@ -583,7 +587,7 @@ config PANEL_LCD_HEIGHT
|
||||
It can either be 1 or 2.
|
||||
|
||||
config PANEL_LCD_WIDTH
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "Number of characters per line on the LCD (1-40)"
|
||||
range 1 40
|
||||
default 40
|
||||
@ -592,7 +596,7 @@ config PANEL_LCD_WIDTH
|
||||
Common values are 16,20,24,40.
|
||||
|
||||
config PANEL_LCD_BWIDTH
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "Internal LCD line width (1-40, 40 by default)"
|
||||
range 1 40
|
||||
default 40
|
||||
@ -608,7 +612,7 @@ config PANEL_LCD_BWIDTH
|
||||
If you don't know, put '40' here.
|
||||
|
||||
config PANEL_LCD_HWIDTH
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "Hardware LCD line width (1-64, 64 by default)"
|
||||
range 1 64
|
||||
default 64
|
||||
@ -622,7 +626,7 @@ config PANEL_LCD_HWIDTH
|
||||
64 here for a 2x40.
|
||||
|
||||
config PANEL_LCD_CHARSET
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "LCD character set (0=normal, 1=KS0074)"
|
||||
range 0 1
|
||||
default 0
|
||||
@ -638,7 +642,7 @@ config PANEL_LCD_CHARSET
|
||||
If you don't know, use the normal one (0).
|
||||
|
||||
config PANEL_LCD_PROTO
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "LCD communication mode (0=parallel 8 bits, 1=serial)"
|
||||
range 0 1
|
||||
default 0
|
||||
@ -651,7 +655,7 @@ config PANEL_LCD_PROTO
|
||||
parallel LCD, and 1 for a serial LCD.
|
||||
|
||||
config PANEL_LCD_PIN_E
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
int "Parallel port pin number & polarity connected to the LCD E signal (-17...17) "
|
||||
range -17 17
|
||||
default 14
|
||||
@ -666,7 +670,7 @@ config PANEL_LCD_PIN_E
|
||||
Default for the 'E' pin in custom profile is '14' (AUTOFEED).
|
||||
|
||||
config PANEL_LCD_PIN_RS
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
int "Parallel port pin number & polarity connected to the LCD RS signal (-17...17) "
|
||||
range -17 17
|
||||
default 17
|
||||
@ -681,7 +685,7 @@ config PANEL_LCD_PIN_RS
|
||||
Default for the 'RS' pin in custom profile is '17' (SELECT IN).
|
||||
|
||||
config PANEL_LCD_PIN_RW
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO="0"
|
||||
int "Parallel port pin number & polarity connected to the LCD RW signal (-17...17) "
|
||||
range -17 17
|
||||
default 16
|
||||
@ -696,7 +700,7 @@ config PANEL_LCD_PIN_RW
|
||||
Default for the 'RW' pin in custom profile is '16' (INIT).
|
||||
|
||||
config PANEL_LCD_PIN_SCL
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
|
||||
int "Parallel port pin number & polarity connected to the LCD SCL signal (-17...17) "
|
||||
range -17 17
|
||||
default 1
|
||||
@ -711,7 +715,7 @@ config PANEL_LCD_PIN_SCL
|
||||
Default for the 'SCL' pin in custom profile is '1' (STROBE).
|
||||
|
||||
config PANEL_LCD_PIN_SDA
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1" && PANEL_LCD_PROTO!="0"
|
||||
int "Parallel port pin number & polarity connected to the LCD SDA signal (-17...17) "
|
||||
range -17 17
|
||||
default 2
|
||||
@ -726,7 +730,7 @@ config PANEL_LCD_PIN_SDA
|
||||
Default for the 'SDA' pin in custom profile is '2' (D0).
|
||||
|
||||
config PANEL_LCD_PIN_BL
|
||||
depends on PANEL && PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
depends on PANEL_PROFILE="0" && PANEL_LCD="1"
|
||||
int "Parallel port pin number & polarity connected to the LCD backlight signal (-17...17) "
|
||||
range -17 17
|
||||
default 0
|
||||
@ -741,7 +745,6 @@ config PANEL_LCD_PIN_BL
|
||||
Default for the 'BL' pin in custom profile is '0' (uncontrolled).
|
||||
|
||||
config PANEL_CHANGE_MESSAGE
|
||||
depends on PANEL
|
||||
bool "Change LCD initialization message ?"
|
||||
default "n"
|
||||
---help---
|
||||
@ -754,7 +757,7 @@ config PANEL_CHANGE_MESSAGE
|
||||
say 'N' and keep the default message with the version.
|
||||
|
||||
config PANEL_BOOT_MESSAGE
|
||||
depends on PANEL && PANEL_CHANGE_MESSAGE="y"
|
||||
depends on PANEL_CHANGE_MESSAGE="y"
|
||||
string "New initialization message"
|
||||
default ""
|
||||
---help---
|
||||
@ -766,6 +769,8 @@ config PANEL_BOOT_MESSAGE
|
||||
An empty message will only clear the display at driver init time. Any other
|
||||
printf()-formatted message is valid with newline and escape codes.
|
||||
|
||||
endif # PANEL
|
||||
|
||||
source "drivers/misc/c2port/Kconfig"
|
||||
source "drivers/misc/eeprom/Kconfig"
|
||||
source "drivers/misc/cb710/Kconfig"
|
||||
|
@ -47,6 +47,7 @@ obj-$(CONFIG_INTEL_MEI) += mei/
|
||||
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
|
||||
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
|
||||
obj-$(CONFIG_SRAM) += sram.o
|
||||
obj-$(CONFIG_SRAM_EXEC) += sram-exec.o
|
||||
obj-y += mic/
|
||||
obj-$(CONFIG_GENWQE) += genwqe/
|
||||
obj-$(CONFIG_ECHO) += echo/
|
||||
|
@ -100,4 +100,14 @@ config EEPROM_DIGSY_MTC_CFG
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config EEPROM_IDT_89HPESX
|
||||
tristate "IDT 89HPESx PCIe-swtiches EEPROM / CSR support"
|
||||
depends on I2C && SYSFS
|
||||
help
|
||||
Enable this driver to get read/write access to EEPROM / CSRs
|
||||
over IDT PCIe-swtich i2c-slave interface.
|
||||
|
||||
This driver can also be built as a module. If so, the module
|
||||
will be called idt_89hpesx.
|
||||
|
||||
endmenu
|
||||
|
@ -5,3 +5,4 @@ obj-$(CONFIG_EEPROM_MAX6875) += max6875.o
|
||||
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
|
||||
obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
|
||||
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
|
||||
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
|
||||
|
1581
drivers/misc/eeprom/idt_89hpesx.c
Normal file
1581
drivers/misc/eeprom/idt_89hpesx.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -1336,7 +1336,6 @@ static int genwqe_sriov_configure(struct pci_dev *dev, int numvfs)
|
||||
static struct pci_error_handlers genwqe_err_handler = {
|
||||
.error_detected = genwqe_err_error_detected,
|
||||
.mmio_enabled = genwqe_err_result_none,
|
||||
.link_reset = genwqe_err_result_none,
|
||||
.slot_reset = genwqe_err_slot_reset,
|
||||
.resume = genwqe_err_resume,
|
||||
};
|
||||
|
@ -81,12 +81,17 @@ void lkdtm_OVERFLOW(void)
|
||||
(void) recursive_loop(recur_count);
|
||||
}
|
||||
|
||||
static noinline void __lkdtm_CORRUPT_STACK(void *stack)
|
||||
{
|
||||
memset(stack, 'a', 64);
|
||||
}
|
||||
|
||||
noinline void lkdtm_CORRUPT_STACK(void)
|
||||
{
|
||||
/* Use default char array length that triggers stack protection. */
|
||||
char data[8];
|
||||
__lkdtm_CORRUPT_STACK(&data);
|
||||
|
||||
memset((void *)data, 'a', 64);
|
||||
pr_info("Corrupted stack with '%16s'...\n", data);
|
||||
}
|
||||
|
||||
|
@ -539,7 +539,9 @@ static void __exit lkdtm_module_exit(void)
|
||||
/* Handle test-specific clean-up. */
|
||||
lkdtm_usercopy_exit();
|
||||
|
||||
unregister_jprobe(lkdtm_jprobe);
|
||||
if (lkdtm_jprobe != NULL)
|
||||
unregister_jprobe(lkdtm_jprobe);
|
||||
|
||||
pr_info("Crash point unregistered\n");
|
||||
}
|
||||
|
||||
|
@ -132,8 +132,7 @@ int mei_amthif_run_next_cmd(struct mei_device *dev)
|
||||
|
||||
dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
|
||||
|
||||
cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
|
||||
typeof(*cb), list);
|
||||
cb = list_first_entry_or_null(&dev->amthif_cmd_list, typeof(*cb), list);
|
||||
if (!cb) {
|
||||
dev->iamthif_state = MEI_IAMTHIF_IDLE;
|
||||
cl->fp = NULL;
|
||||
@ -167,7 +166,7 @@ int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
|
||||
struct mei_device *dev = cl->dev;
|
||||
|
||||
list_add_tail(&cb->list, &dev->amthif_cmd_list.list);
|
||||
list_add_tail(&cb->list, &dev->amthif_cmd_list);
|
||||
|
||||
/*
|
||||
* The previous request is still in processing, queue this one.
|
||||
@ -211,7 +210,7 @@ unsigned int mei_amthif_poll(struct file *file, poll_table *wait)
|
||||
* Return: 0, OK; otherwise, error.
|
||||
*/
|
||||
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -237,7 +236,7 @@ int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
*/
|
||||
int mei_amthif_irq_read_msg(struct mei_cl *cl,
|
||||
struct mei_msg_hdr *mei_hdr,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
int ret;
|
||||
@ -311,51 +310,31 @@ void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_clear_list - removes all callbacks associated with file
|
||||
* from mei_cb_list
|
||||
*
|
||||
* @file: file structure
|
||||
* @mei_cb_list: callbacks list
|
||||
*
|
||||
* mei_clear_list is called to clear resources associated with file
|
||||
* when application calls close function or Ctrl-C was pressed
|
||||
*/
|
||||
static void mei_clear_list(const struct file *file,
|
||||
struct list_head *mei_cb_list)
|
||||
{
|
||||
struct mei_cl_cb *cb, *next;
|
||||
|
||||
list_for_each_entry_safe(cb, next, mei_cb_list, list)
|
||||
if (file == cb->fp)
|
||||
mei_io_cb_free(cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_amthif_release - the release function
|
||||
*
|
||||
* @dev: device structure
|
||||
* @file: pointer to file structure
|
||||
* @fp: pointer to file structure
|
||||
*
|
||||
* Return: 0 on success, <0 on error
|
||||
*/
|
||||
int mei_amthif_release(struct mei_device *dev, struct file *file)
|
||||
int mei_amthif_release(struct mei_device *dev, struct file *fp)
|
||||
{
|
||||
struct mei_cl *cl = file->private_data;
|
||||
struct mei_cl *cl = fp->private_data;
|
||||
|
||||
if (dev->iamthif_open_count > 0)
|
||||
dev->iamthif_open_count--;
|
||||
|
||||
if (cl->fp == file && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
|
||||
if (cl->fp == fp && dev->iamthif_state != MEI_IAMTHIF_IDLE) {
|
||||
|
||||
dev_dbg(dev->dev, "amthif canceled iamthif state %d\n",
|
||||
dev->iamthif_state);
|
||||
dev->iamthif_state);
|
||||
dev->iamthif_canceled = true;
|
||||
}
|
||||
|
||||
mei_clear_list(file, &dev->amthif_cmd_list.list);
|
||||
mei_clear_list(file, &cl->rd_completed);
|
||||
mei_clear_list(file, &dev->ctrl_rd_list.list);
|
||||
/* Don't clean ctrl_rd_list here, the reads has to be completed */
|
||||
mei_io_list_free_fp(&dev->amthif_cmd_list, fp);
|
||||
mei_io_list_free_fp(&cl->rd_completed, fp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -498,6 +498,25 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mei_cldev_enable);
|
||||
|
||||
/**
|
||||
* mei_cldev_unregister_callbacks - internal wrapper for unregistering
|
||||
* callbacks.
|
||||
*
|
||||
* @cldev: client device
|
||||
*/
|
||||
static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
|
||||
{
|
||||
if (cldev->rx_cb) {
|
||||
cancel_work_sync(&cldev->rx_work);
|
||||
cldev->rx_cb = NULL;
|
||||
}
|
||||
|
||||
if (cldev->notif_cb) {
|
||||
cancel_work_sync(&cldev->notif_work);
|
||||
cldev->notif_cb = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cldev_disable - disable me client device
|
||||
* disconnect form the me client
|
||||
@ -519,6 +538,8 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
|
||||
|
||||
bus = cldev->bus;
|
||||
|
||||
mei_cldev_unregister_callbacks(cldev);
|
||||
|
||||
mutex_lock(&bus->device_lock);
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
@ -541,6 +562,37 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mei_cldev_disable);
|
||||
|
||||
/**
|
||||
* mei_cl_bus_module_get - acquire module of the underlying
|
||||
* hw module.
|
||||
*
|
||||
* @cl: host client
|
||||
*
|
||||
* Return: true on success; false if the module was removed.
|
||||
*/
|
||||
bool mei_cl_bus_module_get(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_cl_device *cldev = cl->cldev;
|
||||
|
||||
if (!cldev)
|
||||
return true;
|
||||
|
||||
return try_module_get(cldev->bus->dev->driver->owner);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_bus_module_put - release the underlying hw module.
|
||||
*
|
||||
* @cl: host client
|
||||
*/
|
||||
void mei_cl_bus_module_put(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_cl_device *cldev = cl->cldev;
|
||||
|
||||
if (cldev)
|
||||
module_put(cldev->bus->dev->driver->owner);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_device_find - find matching entry in the driver id table
|
||||
*
|
||||
@ -665,19 +717,12 @@ static int mei_cl_device_remove(struct device *dev)
|
||||
if (!cldev || !dev->driver)
|
||||
return 0;
|
||||
|
||||
if (cldev->rx_cb) {
|
||||
cancel_work_sync(&cldev->rx_work);
|
||||
cldev->rx_cb = NULL;
|
||||
}
|
||||
if (cldev->notif_cb) {
|
||||
cancel_work_sync(&cldev->notif_work);
|
||||
cldev->notif_cb = NULL;
|
||||
}
|
||||
|
||||
cldrv = to_mei_cl_driver(dev->driver);
|
||||
if (cldrv->remove)
|
||||
ret = cldrv->remove(cldev);
|
||||
|
||||
mei_cldev_unregister_callbacks(cldev);
|
||||
|
||||
module_put(THIS_MODULE);
|
||||
dev->driver = NULL;
|
||||
return ret;
|
||||
|
@ -377,19 +377,19 @@ static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
|
||||
}
|
||||
|
||||
/**
|
||||
* __mei_io_list_flush - removes and frees cbs belonging to cl.
|
||||
* __mei_io_list_flush_cl - removes and frees cbs belonging to cl.
|
||||
*
|
||||
* @list: an instance of our list structure
|
||||
* @head: an instance of our list structure
|
||||
* @cl: host client, can be NULL for flushing the whole list
|
||||
* @free: whether to free the cbs
|
||||
*/
|
||||
static void __mei_io_list_flush(struct mei_cl_cb *list,
|
||||
struct mei_cl *cl, bool free)
|
||||
static void __mei_io_list_flush_cl(struct list_head *head,
|
||||
const struct mei_cl *cl, bool free)
|
||||
{
|
||||
struct mei_cl_cb *cb, *next;
|
||||
|
||||
/* enable removing everything if no cl is specified */
|
||||
list_for_each_entry_safe(cb, next, &list->list, list) {
|
||||
list_for_each_entry_safe(cb, next, head, list) {
|
||||
if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
|
||||
list_del_init(&cb->list);
|
||||
if (free)
|
||||
@ -399,25 +399,42 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_io_list_flush - removes list entry belonging to cl.
|
||||
* mei_io_list_flush_cl - removes list entry belonging to cl.
|
||||
*
|
||||
* @list: An instance of our list structure
|
||||
* @head: An instance of our list structure
|
||||
* @cl: host client
|
||||
*/
|
||||
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
|
||||
static inline void mei_io_list_flush_cl(struct list_head *head,
|
||||
const struct mei_cl *cl)
|
||||
{
|
||||
__mei_io_list_flush(list, cl, false);
|
||||
__mei_io_list_flush_cl(head, cl, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_io_list_free - removes cb belonging to cl and free them
|
||||
* mei_io_list_free_cl - removes cb belonging to cl and free them
|
||||
*
|
||||
* @list: An instance of our list structure
|
||||
* @head: An instance of our list structure
|
||||
* @cl: host client
|
||||
*/
|
||||
static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
|
||||
static inline void mei_io_list_free_cl(struct list_head *head,
|
||||
const struct mei_cl *cl)
|
||||
{
|
||||
__mei_io_list_flush(list, cl, true);
|
||||
__mei_io_list_flush_cl(head, cl, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_io_list_free_fp - free cb from a list that matches file pointer
|
||||
*
|
||||
* @head: io list
|
||||
* @fp: file pointer (matching cb file object), may be NULL
|
||||
*/
|
||||
void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
|
||||
{
|
||||
struct mei_cl_cb *cb, *next;
|
||||
|
||||
list_for_each_entry_safe(cb, next, head, list)
|
||||
if (!fp || fp == cb->fp)
|
||||
mei_io_cb_free(cb);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -479,7 +496,7 @@ struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
|
||||
if (!cb)
|
||||
return NULL;
|
||||
|
||||
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list.list);
|
||||
list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
|
||||
return cb;
|
||||
}
|
||||
|
||||
@ -503,27 +520,6 @@ struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_read_cb_flush - free client's read pending and completed cbs
|
||||
* for a specific file
|
||||
*
|
||||
* @cl: host client
|
||||
* @fp: file pointer (matching cb file object), may be NULL
|
||||
*/
|
||||
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
|
||||
{
|
||||
struct mei_cl_cb *cb, *next;
|
||||
|
||||
list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
|
||||
if (!fp || fp == cb->fp)
|
||||
mei_io_cb_free(cb);
|
||||
|
||||
|
||||
list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
|
||||
if (!fp || fp == cb->fp)
|
||||
mei_io_cb_free(cb);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_cl_flush_queues - flushes queue lists belonging to cl.
|
||||
*
|
||||
@ -542,18 +538,16 @@ int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
|
||||
dev = cl->dev;
|
||||
|
||||
cl_dbg(dev, cl, "remove list entry belonging to cl\n");
|
||||
mei_io_list_free(&cl->dev->write_list, cl);
|
||||
mei_io_list_free(&cl->dev->write_waiting_list, cl);
|
||||
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
|
||||
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
|
||||
|
||||
mei_cl_read_cb_flush(cl, fp);
|
||||
mei_io_list_free_cl(&cl->dev->write_list, cl);
|
||||
mei_io_list_free_cl(&cl->dev->write_waiting_list, cl);
|
||||
mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
|
||||
mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
|
||||
mei_io_list_free_fp(&cl->rd_pending, fp);
|
||||
mei_io_list_free_fp(&cl->rd_completed, fp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* mei_cl_init - initializes cl.
|
||||
*
|
||||
@ -756,7 +750,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
|
||||
*
|
||||
* @cl: host client
|
||||
*/
|
||||
void mei_cl_set_disconnected(struct mei_cl *cl)
|
||||
static void mei_cl_set_disconnected(struct mei_cl *cl)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
|
||||
@ -765,15 +759,18 @@ void mei_cl_set_disconnected(struct mei_cl *cl)
|
||||
return;
|
||||
|
||||
cl->state = MEI_FILE_DISCONNECTED;
|
||||
mei_io_list_free(&dev->write_list, cl);
|
||||
mei_io_list_free(&dev->write_waiting_list, cl);
|
||||
mei_io_list_flush(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush(&dev->ctrl_wr_list, cl);
|
||||
mei_io_list_free_cl(&dev->write_list, cl);
|
||||
mei_io_list_free_cl(&dev->write_waiting_list, cl);
|
||||
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
|
||||
mei_io_list_free_cl(&dev->amthif_cmd_list, cl);
|
||||
mei_cl_wake_all(cl);
|
||||
cl->rx_flow_ctrl_creds = 0;
|
||||
cl->tx_flow_ctrl_creds = 0;
|
||||
cl->timer_count = 0;
|
||||
|
||||
mei_cl_bus_module_put(cl);
|
||||
|
||||
if (!cl->me_cl)
|
||||
return;
|
||||
|
||||
@ -829,7 +826,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
cl->timer_count = MEI_CONNECT_TIMEOUT;
|
||||
mei_schedule_stall_timer(dev);
|
||||
|
||||
@ -847,7 +844,7 @@ static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
* Return: 0, OK; otherwise, error.
|
||||
*/
|
||||
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
@ -862,7 +859,7 @@ int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
|
||||
ret = mei_cl_send_disconnect(cl, cb);
|
||||
if (ret)
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -984,7 +981,7 @@ static bool mei_cl_is_other_connecting(struct mei_cl *cl)
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
list_for_each_entry(cb, &dev->ctrl_rd_list.list, list) {
|
||||
list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
|
||||
if (cb->fop_type == MEI_FOP_CONNECT &&
|
||||
mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
|
||||
return true;
|
||||
@ -1015,7 +1012,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
cl->timer_count = MEI_CONNECT_TIMEOUT;
|
||||
mei_schedule_stall_timer(dev);
|
||||
return 0;
|
||||
@ -1031,7 +1028,7 @@ static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
* Return: 0, OK; otherwise, error.
|
||||
*/
|
||||
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
@ -1049,7 +1046,7 @@ int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
|
||||
rets = mei_cl_send_connect(cl, cb);
|
||||
if (rets)
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
|
||||
return rets;
|
||||
}
|
||||
@ -1077,13 +1074,17 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
if (!mei_cl_bus_module_get(cl))
|
||||
return -ENODEV;
|
||||
|
||||
rets = mei_cl_set_connecting(cl, me_cl);
|
||||
if (rets)
|
||||
return rets;
|
||||
goto nortpm;
|
||||
|
||||
if (mei_cl_is_fixed_address(cl)) {
|
||||
cl->state = MEI_FILE_CONNECTED;
|
||||
return 0;
|
||||
rets = 0;
|
||||
goto nortpm;
|
||||
}
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
@ -1117,8 +1118,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
|
||||
mei_io_list_flush(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush(&dev->ctrl_wr_list, cl);
|
||||
mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
|
||||
mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
|
||||
/* ignore disconnect return valuue;
|
||||
* in case of failure reset will be invoked
|
||||
*/
|
||||
@ -1270,7 +1271,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
|
||||
* Return: 0 on such and error otherwise.
|
||||
*/
|
||||
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
@ -1288,11 +1289,11 @@ int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
ret = mei_hbm_cl_notify_req(dev, cl, request);
|
||||
if (ret) {
|
||||
cl->status = ret;
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1325,6 +1326,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return -ENODEV;
|
||||
|
||||
rets = pm_runtime_get(dev->dev);
|
||||
if (rets < 0 && rets != -EINPROGRESS) {
|
||||
pm_runtime_put_noidle(dev->dev);
|
||||
@ -1344,7 +1348,7 @@ int mei_cl_notify_request(struct mei_cl *cl,
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list.list);
|
||||
list_move_tail(&cb->list, &dev->ctrl_rd_list);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->device_lock);
|
||||
@ -1419,6 +1423,11 @@ int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
|
||||
|
||||
dev = cl->dev;
|
||||
|
||||
if (!dev->hbm_f_ev_supported) {
|
||||
cl_dbg(dev, cl, "notifications not supported\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (!mei_cl_is_connected(cl))
|
||||
return -ENODEV;
|
||||
|
||||
@ -1519,7 +1528,7 @@ nortpm:
|
||||
* Return: 0, OK; otherwise error.
|
||||
*/
|
||||
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_msg_data *buf;
|
||||
@ -1591,13 +1600,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
}
|
||||
|
||||
if (mei_hdr.msg_complete)
|
||||
list_move_tail(&cb->list, &dev->write_waiting_list.list);
|
||||
list_move_tail(&cb->list, &dev->write_waiting_list);
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
cl->status = rets;
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
return rets;
|
||||
}
|
||||
|
||||
@ -1687,9 +1696,9 @@ int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
|
||||
|
||||
out:
|
||||
if (mei_hdr.msg_complete)
|
||||
list_add_tail(&cb->list, &dev->write_waiting_list.list);
|
||||
list_add_tail(&cb->list, &dev->write_waiting_list);
|
||||
else
|
||||
list_add_tail(&cb->list, &dev->write_list.list);
|
||||
list_add_tail(&cb->list, &dev->write_list);
|
||||
|
||||
cb = NULL;
|
||||
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
|
||||
|
@ -83,17 +83,7 @@ static inline u8 mei_me_cl_ver(const struct mei_me_client *me_cl)
|
||||
* MEI IO Functions
|
||||
*/
|
||||
void mei_io_cb_free(struct mei_cl_cb *priv_cb);
|
||||
|
||||
/**
|
||||
* mei_io_list_init - Sets up a queue list.
|
||||
*
|
||||
* @list: An instance cl callback structure
|
||||
*/
|
||||
static inline void mei_io_list_init(struct mei_cl_cb *list)
|
||||
{
|
||||
INIT_LIST_HEAD(&list->list);
|
||||
}
|
||||
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl);
|
||||
void mei_io_list_free_fp(struct list_head *head, const struct file *fp);
|
||||
|
||||
/*
|
||||
* MEI Host Client Functions
|
||||
@ -110,7 +100,6 @@ struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev);
|
||||
|
||||
struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl,
|
||||
const struct file *fp);
|
||||
void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp);
|
||||
struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
|
||||
enum mei_cb_file_ops type,
|
||||
const struct file *fp);
|
||||
@ -209,19 +198,18 @@ static inline u8 mei_cl_host_addr(const struct mei_cl *cl)
|
||||
}
|
||||
|
||||
int mei_cl_disconnect(struct mei_cl *cl);
|
||||
void mei_cl_set_disconnected(struct mei_cl *cl);
|
||||
int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
|
||||
const struct file *file);
|
||||
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
|
||||
int mei_cl_irq_read_msg(struct mei_cl *cl, struct mei_msg_hdr *hdr,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb);
|
||||
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
|
||||
void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
|
||||
|
||||
@ -232,7 +220,7 @@ enum mei_cb_file_ops mei_cl_notify_req2fop(u8 request);
|
||||
int mei_cl_notify_request(struct mei_cl *cl,
|
||||
const struct file *file, u8 request);
|
||||
int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev);
|
||||
void mei_cl_notify(struct mei_cl *cl);
|
||||
|
||||
|
@ -815,7 +815,7 @@ static void mei_hbm_cl_res(struct mei_device *dev,
|
||||
struct mei_cl_cb *cb, *next;
|
||||
|
||||
cl = NULL;
|
||||
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
|
||||
list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list, list) {
|
||||
|
||||
cl = cb->cl;
|
||||
|
||||
|
@ -139,6 +139,19 @@ static inline void mei_hcsr_set(struct mei_device *dev, u32 reg)
|
||||
mei_hcsr_write(dev, reg);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_hcsr_set_hig - set host interrupt (set H_IG)
|
||||
*
|
||||
* @dev: the device structure
|
||||
*/
|
||||
static inline void mei_hcsr_set_hig(struct mei_device *dev)
|
||||
{
|
||||
u32 hcsr;
|
||||
|
||||
hcsr = mei_hcsr_read(dev) | H_IG;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_d0i3c_read - Reads 32bit data from the D0I3C register
|
||||
*
|
||||
@ -380,6 +393,19 @@ static bool mei_me_hw_is_ready(struct mei_device *dev)
|
||||
return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_hw_is_resetting - check whether the me(hw) is in reset
|
||||
*
|
||||
* @dev: mei device
|
||||
* Return: bool
|
||||
*/
|
||||
static bool mei_me_hw_is_resetting(struct mei_device *dev)
|
||||
{
|
||||
u32 mecsr = mei_me_mecsr_read(dev);
|
||||
|
||||
return (mecsr & ME_RST_HRA) == ME_RST_HRA;
|
||||
}
|
||||
|
||||
/**
|
||||
* mei_me_hw_ready_wait - wait until the me(hw) has turned ready
|
||||
* or timeout is reached
|
||||
@ -505,7 +531,6 @@ static int mei_me_hbuf_write(struct mei_device *dev,
|
||||
unsigned long rem;
|
||||
unsigned long length = header->length;
|
||||
u32 *reg_buf = (u32 *)buf;
|
||||
u32 hcsr;
|
||||
u32 dw_cnt;
|
||||
int i;
|
||||
int empty_slots;
|
||||
@ -532,8 +557,7 @@ static int mei_me_hbuf_write(struct mei_device *dev,
|
||||
mei_me_hcbww_write(dev, reg);
|
||||
}
|
||||
|
||||
hcsr = mei_hcsr_read(dev) | H_IG;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
mei_hcsr_set_hig(dev);
|
||||
if (!mei_me_hw_is_ready(dev))
|
||||
return -EIO;
|
||||
|
||||
@ -580,7 +604,6 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
|
||||
unsigned long buffer_length)
|
||||
{
|
||||
u32 *reg_buf = (u32 *)buffer;
|
||||
u32 hcsr;
|
||||
|
||||
for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
|
||||
*reg_buf++ = mei_me_mecbrw_read(dev);
|
||||
@ -591,8 +614,7 @@ static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
|
||||
memcpy(reg_buf, ®, buffer_length);
|
||||
}
|
||||
|
||||
hcsr = mei_hcsr_read(dev) | H_IG;
|
||||
mei_hcsr_set(dev, hcsr);
|
||||
mei_hcsr_set_hig(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1189,7 +1211,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
|
||||
irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mei_device *dev = (struct mei_device *) dev_id;
|
||||
struct mei_cl_cb complete_list;
|
||||
struct list_head cmpl_list;
|
||||
s32 slots;
|
||||
u32 hcsr;
|
||||
int rets = 0;
|
||||
@ -1201,7 +1223,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
||||
hcsr = mei_hcsr_read(dev);
|
||||
me_intr_clear(dev, hcsr);
|
||||
|
||||
mei_io_list_init(&complete_list);
|
||||
INIT_LIST_HEAD(&cmpl_list);
|
||||
|
||||
/* check if ME wants a reset */
|
||||
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
|
||||
@ -1210,6 +1232,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (mei_me_hw_is_resetting(dev))
|
||||
mei_hcsr_set_hig(dev);
|
||||
|
||||
mei_me_pg_intr(dev, me_intr_src(hcsr));
|
||||
|
||||
/* check if we need to start the dev */
|
||||
@ -1227,7 +1252,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
||||
slots = mei_count_full_read_slots(dev);
|
||||
while (slots > 0) {
|
||||
dev_dbg(dev->dev, "slots to read = %08x\n", slots);
|
||||
rets = mei_irq_read_handler(dev, &complete_list, &slots);
|
||||
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
|
||||
/* There is a race between ME write and interrupt delivery:
|
||||
* Not all data is always available immediately after the
|
||||
* interrupt, so try to read again on the next interrupt.
|
||||
@ -1252,11 +1277,11 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
|
||||
*/
|
||||
if (dev->pg_event != MEI_PG_EVENT_WAIT &&
|
||||
dev->pg_event != MEI_PG_EVENT_RECEIVED) {
|
||||
rets = mei_irq_write_handler(dev, &complete_list);
|
||||
rets = mei_irq_write_handler(dev, &cmpl_list);
|
||||
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
|
||||
}
|
||||
|
||||
mei_irq_compl_handler(dev, &complete_list);
|
||||
mei_irq_compl_handler(dev, &cmpl_list);
|
||||
|
||||
end:
|
||||
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
|
||||
@ -1389,7 +1414,7 @@ const struct mei_cfg mei_me_pch8_sps_cfg = {
|
||||
* @pdev: The pci device structure
|
||||
* @cfg: per device generation config
|
||||
*
|
||||
* Return: The mei_device_device pointer on success, NULL on failure.
|
||||
* Return: The mei_device pointer on success, NULL on failure.
|
||||
*/
|
||||
struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
|
||||
const struct mei_cfg *cfg)
|
||||
@ -1397,8 +1422,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
|
||||
struct mei_device *dev;
|
||||
struct mei_me_hw *hw;
|
||||
|
||||
dev = kzalloc(sizeof(struct mei_device) +
|
||||
sizeof(struct mei_me_hw), GFP_KERNEL);
|
||||
dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
|
||||
sizeof(struct mei_me_hw), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
hw = to_me_hw(dev);
|
||||
|
@ -1057,7 +1057,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct mei_device *dev = (struct mei_device *) dev_id;
|
||||
struct mei_txe_hw *hw = to_txe_hw(dev);
|
||||
struct mei_cl_cb complete_list;
|
||||
struct list_head cmpl_list;
|
||||
s32 slots;
|
||||
int rets = 0;
|
||||
|
||||
@ -1069,7 +1069,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
|
||||
|
||||
/* initialize our complete list */
|
||||
mutex_lock(&dev->device_lock);
|
||||
mei_io_list_init(&complete_list);
|
||||
INIT_LIST_HEAD(&cmpl_list);
|
||||
|
||||
if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
|
||||
mei_txe_check_and_ack_intrs(dev, true);
|
||||
@ -1126,7 +1126,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
|
||||
slots = mei_count_full_read_slots(dev);
|
||||
if (test_and_clear_bit(TXE_INTR_OUT_DB_BIT, &hw->intr_cause)) {
|
||||
/* Read from TXE */
|
||||
rets = mei_irq_read_handler(dev, &complete_list, &slots);
|
||||
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
|
||||
if (rets && dev->dev_state != MEI_DEV_RESETTING) {
|
||||
dev_err(dev->dev,
|
||||
"mei_irq_read_handler ret = %d.\n", rets);
|
||||
@ -1144,14 +1144,14 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
|
||||
if (hw->aliveness && dev->hbuf_is_ready) {
|
||||
/* get the real register value */
|
||||
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
|
||||
rets = mei_irq_write_handler(dev, &complete_list);
|
||||
rets = mei_irq_write_handler(dev, &cmpl_list);
|
||||
if (rets && rets != -EMSGSIZE)
|
||||
dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
|
||||
rets);
|
||||
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
|
||||
}
|
||||
|
||||
mei_irq_compl_handler(dev, &complete_list);
|
||||
mei_irq_compl_handler(dev, &cmpl_list);
|
||||
|
||||
end:
|
||||
dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
|
||||
@ -1207,8 +1207,8 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
|
||||
struct mei_device *dev;
|
||||
struct mei_txe_hw *hw;
|
||||
|
||||
dev = kzalloc(sizeof(struct mei_device) +
|
||||
sizeof(struct mei_txe_hw), GFP_KERNEL);
|
||||
dev = devm_kzalloc(&pdev->dev, sizeof(struct mei_device) +
|
||||
sizeof(struct mei_txe_hw), GFP_KERNEL);
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
|
@ -45,7 +45,7 @@
|
||||
* @intr_cause: translated interrupt cause
|
||||
*/
|
||||
struct mei_txe_hw {
|
||||
void __iomem *mem_addr[NUM_OF_MEM_BARS];
|
||||
void __iomem * const *mem_addr;
|
||||
u32 aliveness;
|
||||
u32 readiness;
|
||||
u32 slots;
|
||||
|
@ -349,16 +349,16 @@ EXPORT_SYMBOL_GPL(mei_stop);
|
||||
bool mei_write_is_idle(struct mei_device *dev)
|
||||
{
|
||||
bool idle = (dev->dev_state == MEI_DEV_ENABLED &&
|
||||
list_empty(&dev->ctrl_wr_list.list) &&
|
||||
list_empty(&dev->write_list.list) &&
|
||||
list_empty(&dev->write_waiting_list.list));
|
||||
list_empty(&dev->ctrl_wr_list) &&
|
||||
list_empty(&dev->write_list) &&
|
||||
list_empty(&dev->write_waiting_list));
|
||||
|
||||
dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
|
||||
idle,
|
||||
mei_dev_state_str(dev->dev_state),
|
||||
list_empty(&dev->ctrl_wr_list.list),
|
||||
list_empty(&dev->write_list.list),
|
||||
list_empty(&dev->write_waiting_list.list));
|
||||
list_empty(&dev->ctrl_wr_list),
|
||||
list_empty(&dev->write_list),
|
||||
list_empty(&dev->write_waiting_list));
|
||||
|
||||
return idle;
|
||||
}
|
||||
@ -388,17 +388,17 @@ void mei_device_init(struct mei_device *dev,
|
||||
dev->dev_state = MEI_DEV_INITIALIZING;
|
||||
dev->reset_count = 0;
|
||||
|
||||
mei_io_list_init(&dev->write_list);
|
||||
mei_io_list_init(&dev->write_waiting_list);
|
||||
mei_io_list_init(&dev->ctrl_wr_list);
|
||||
mei_io_list_init(&dev->ctrl_rd_list);
|
||||
INIT_LIST_HEAD(&dev->write_list);
|
||||
INIT_LIST_HEAD(&dev->write_waiting_list);
|
||||
INIT_LIST_HEAD(&dev->ctrl_wr_list);
|
||||
INIT_LIST_HEAD(&dev->ctrl_rd_list);
|
||||
|
||||
INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
|
||||
INIT_WORK(&dev->reset_work, mei_reset_work);
|
||||
INIT_WORK(&dev->bus_rescan_work, mei_cl_bus_rescan_work);
|
||||
|
||||
INIT_LIST_HEAD(&dev->iamthif_cl.link);
|
||||
mei_io_list_init(&dev->amthif_cmd_list);
|
||||
INIT_LIST_HEAD(&dev->amthif_cmd_list);
|
||||
|
||||
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
|
||||
dev->open_handle_count = 0;
|
||||
|
@ -35,14 +35,14 @@
|
||||
* for the completed callbacks
|
||||
*
|
||||
* @dev: mei device
|
||||
* @compl_list: list of completed cbs
|
||||
* @cmpl_list: list of completed cbs
|
||||
*/
|
||||
void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
|
||||
void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_cl_cb *cb, *next;
|
||||
struct mei_cl *cl;
|
||||
|
||||
list_for_each_entry_safe(cb, next, &compl_list->list, list) {
|
||||
list_for_each_entry_safe(cb, next, cmpl_list, list) {
|
||||
cl = cb->cl;
|
||||
list_del_init(&cb->list);
|
||||
|
||||
@ -92,13 +92,13 @@ void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
|
||||
*
|
||||
* @cl: reading client
|
||||
* @mei_hdr: header of mei client message
|
||||
* @complete_list: completion list
|
||||
* @cmpl_list: completion list
|
||||
*
|
||||
* Return: always 0
|
||||
*/
|
||||
int mei_cl_irq_read_msg(struct mei_cl *cl,
|
||||
struct mei_msg_hdr *mei_hdr,
|
||||
struct mei_cl_cb *complete_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
struct mei_cl_cb *cb;
|
||||
@ -144,7 +144,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
|
||||
|
||||
if (mei_hdr->msg_complete) {
|
||||
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
|
||||
list_move_tail(&cb->list, &complete_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
} else {
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_request_autosuspend(dev->dev);
|
||||
@ -154,7 +154,7 @@ int mei_cl_irq_read_msg(struct mei_cl *cl,
|
||||
|
||||
discard:
|
||||
if (cb)
|
||||
list_move_tail(&cb->list, &complete_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
mei_irq_discard_msg(dev, mei_hdr);
|
||||
return 0;
|
||||
}
|
||||
@ -169,7 +169,7 @@ discard:
|
||||
* Return: 0, OK; otherwise, error.
|
||||
*/
|
||||
static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
@ -183,7 +183,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
return -EMSGSIZE;
|
||||
|
||||
ret = mei_hbm_cl_disconnect_rsp(dev, cl);
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -199,7 +199,7 @@ static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
* Return: 0, OK; otherwise, error.
|
||||
*/
|
||||
static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list)
|
||||
struct list_head *cmpl_list)
|
||||
{
|
||||
struct mei_device *dev = cl->dev;
|
||||
u32 msg_slots;
|
||||
@ -219,7 +219,7 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
if (ret) {
|
||||
cl->status = ret;
|
||||
cb->buf_idx = 0;
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
|
||||
* Return: 0 on success, <0 on failure.
|
||||
*/
|
||||
int mei_irq_read_handler(struct mei_device *dev,
|
||||
struct mei_cl_cb *cmpl_list, s32 *slots)
|
||||
struct list_head *cmpl_list, s32 *slots)
|
||||
{
|
||||
struct mei_msg_hdr *mei_hdr;
|
||||
struct mei_cl *cl;
|
||||
@ -347,12 +347,11 @@ EXPORT_SYMBOL_GPL(mei_irq_read_handler);
|
||||
*
|
||||
* Return: 0 on success, <0 on failure.
|
||||
*/
|
||||
int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
|
||||
int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
|
||||
{
|
||||
|
||||
struct mei_cl *cl;
|
||||
struct mei_cl_cb *cb, *next;
|
||||
struct mei_cl_cb *list;
|
||||
s32 slots;
|
||||
int ret;
|
||||
|
||||
@ -367,19 +366,18 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
|
||||
/* complete all waiting for write CB */
|
||||
dev_dbg(dev->dev, "complete all waiting for write cb.\n");
|
||||
|
||||
list = &dev->write_waiting_list;
|
||||
list_for_each_entry_safe(cb, next, &list->list, list) {
|
||||
list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
|
||||
cl = cb->cl;
|
||||
|
||||
cl->status = 0;
|
||||
cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
|
||||
cl->writing_state = MEI_WRITE_COMPLETE;
|
||||
list_move_tail(&cb->list, &cmpl_list->list);
|
||||
list_move_tail(&cb->list, cmpl_list);
|
||||
}
|
||||
|
||||
/* complete control write list CB */
|
||||
dev_dbg(dev->dev, "complete control write list cb.\n");
|
||||
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
|
||||
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
|
||||
cl = cb->cl;
|
||||
switch (cb->fop_type) {
|
||||
case MEI_FOP_DISCONNECT:
|
||||
@ -423,7 +421,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
|
||||
}
|
||||
/* complete write list CB */
|
||||
dev_dbg(dev->dev, "complete write list cb.\n");
|
||||
list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
|
||||
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
|
||||
cl = cb->cl;
|
||||
if (cl == &dev->iamthif_cl)
|
||||
ret = mei_amthif_irq_write(cl, cb, cmpl_list);
|
||||
|
@ -182,32 +182,36 @@ static ssize_t mei_read(struct file *file, char __user *ubuf,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (rets == -EBUSY &&
|
||||
!mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, file)) {
|
||||
rets = -ENOMEM;
|
||||
|
||||
again:
|
||||
mutex_unlock(&dev->device_lock);
|
||||
if (wait_event_interruptible(cl->rx_wait,
|
||||
!list_empty(&cl->rd_completed) ||
|
||||
!mei_cl_is_connected(cl))) {
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
mutex_lock(&dev->device_lock);
|
||||
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
mutex_unlock(&dev->device_lock);
|
||||
cb = mei_cl_read_cb(cl, file);
|
||||
if (!cb) {
|
||||
/*
|
||||
* For amthif all the waiters are woken up,
|
||||
* but only fp with matching cb->fp get the cb,
|
||||
* the others have to return to wait on read.
|
||||
*/
|
||||
if (cl == &dev->iamthif_cl)
|
||||
goto again;
|
||||
|
||||
if (wait_event_interruptible(cl->rx_wait,
|
||||
(!list_empty(&cl->rd_completed)) ||
|
||||
(!mei_cl_is_connected(cl)))) {
|
||||
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
mutex_lock(&dev->device_lock);
|
||||
if (!mei_cl_is_connected(cl)) {
|
||||
rets = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
cb = mei_cl_read_cb(cl, file);
|
||||
} while (!cb);
|
||||
rets = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
copy_buffer:
|
||||
/* now copy the data to user space */
|
||||
|
@ -328,6 +328,8 @@ ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
|
||||
bool mei_cl_bus_rx_event(struct mei_cl *cl);
|
||||
bool mei_cl_bus_notify_event(struct mei_cl *cl);
|
||||
void mei_cl_bus_remove_devices(struct mei_device *bus);
|
||||
bool mei_cl_bus_module_get(struct mei_cl *cl);
|
||||
void mei_cl_bus_module_put(struct mei_cl *cl);
|
||||
int mei_cl_bus_init(void);
|
||||
void mei_cl_bus_exit(void);
|
||||
|
||||
@ -439,10 +441,10 @@ struct mei_device {
|
||||
struct cdev cdev;
|
||||
int minor;
|
||||
|
||||
struct mei_cl_cb write_list;
|
||||
struct mei_cl_cb write_waiting_list;
|
||||
struct mei_cl_cb ctrl_wr_list;
|
||||
struct mei_cl_cb ctrl_rd_list;
|
||||
struct list_head write_list;
|
||||
struct list_head write_waiting_list;
|
||||
struct list_head ctrl_wr_list;
|
||||
struct list_head ctrl_rd_list;
|
||||
|
||||
struct list_head file_list;
|
||||
long open_handle_count;
|
||||
@ -499,7 +501,7 @@ struct mei_device {
|
||||
bool override_fixed_address;
|
||||
|
||||
/* amthif list for cmd waiting */
|
||||
struct mei_cl_cb amthif_cmd_list;
|
||||
struct list_head amthif_cmd_list;
|
||||
struct mei_cl iamthif_cl;
|
||||
long iamthif_open_count;
|
||||
u32 iamthif_stall_timer;
|
||||
@ -571,10 +573,10 @@ void mei_cancel_work(struct mei_device *dev);
|
||||
void mei_timer(struct work_struct *work);
|
||||
void mei_schedule_stall_timer(struct mei_device *dev);
|
||||
int mei_irq_read_handler(struct mei_device *dev,
|
||||
struct mei_cl_cb *cmpl_list, s32 *slots);
|
||||
struct list_head *cmpl_list, s32 *slots);
|
||||
|
||||
int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
|
||||
void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list);
|
||||
int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list);
|
||||
void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list);
|
||||
|
||||
/*
|
||||
* AMTHIF - AMT Host Interface Functions
|
||||
@ -590,12 +592,12 @@ int mei_amthif_release(struct mei_device *dev, struct file *file);
|
||||
int mei_amthif_write(struct mei_cl *cl, struct mei_cl_cb *cb);
|
||||
int mei_amthif_run_next_cmd(struct mei_device *dev);
|
||||
int mei_amthif_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
|
||||
struct mei_cl_cb *cmpl_list);
|
||||
struct list_head *cmpl_list);
|
||||
|
||||
void mei_amthif_complete(struct mei_cl *cl, struct mei_cl_cb *cb);
|
||||
int mei_amthif_irq_read_msg(struct mei_cl *cl,
|
||||
struct mei_msg_hdr *mei_hdr,
|
||||
struct mei_cl_cb *complete_list);
|
||||
struct list_head *cmpl_list);
|
||||
int mei_amthif_irq_read(struct mei_device *dev, s32 *slots);
|
||||
|
||||
/*
|
||||
|
@ -149,18 +149,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return -ENODEV;
|
||||
|
||||
/* enable pci dev */
|
||||
err = pci_enable_device(pdev);
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to enable pci device.\n");
|
||||
goto end;
|
||||
}
|
||||
/* set PCI host mastering */
|
||||
pci_set_master(pdev);
|
||||
/* pci request regions for mei driver */
|
||||
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
||||
/* pci request regions and mapping IO device memory for mei driver */
|
||||
err = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to get pci regions.\n");
|
||||
goto disable_device;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
|
||||
@ -173,24 +173,18 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
|
||||
goto release_regions;
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
||||
/* allocates and initializes the mei dev structure */
|
||||
dev = mei_me_dev_init(pdev, cfg);
|
||||
if (!dev) {
|
||||
err = -ENOMEM;
|
||||
goto release_regions;
|
||||
goto end;
|
||||
}
|
||||
hw = to_me_hw(dev);
|
||||
/* mapping IO device memory */
|
||||
hw->mem_addr = pci_iomap(pdev, 0, 0);
|
||||
if (!hw->mem_addr) {
|
||||
dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
|
||||
err = -ENOMEM;
|
||||
goto free_device;
|
||||
}
|
||||
hw->mem_addr = pcim_iomap_table(pdev)[0];
|
||||
|
||||
pci_enable_msi(pdev);
|
||||
|
||||
/* request and enable interrupt */
|
||||
@ -203,7 +197,7 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
|
||||
pdev->irq);
|
||||
goto disable_msi;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (mei_start(dev)) {
|
||||
@ -242,15 +236,6 @@ release_irq:
|
||||
mei_cancel_work(dev);
|
||||
mei_disable_interrupts(dev);
|
||||
free_irq(pdev->irq, dev);
|
||||
disable_msi:
|
||||
pci_disable_msi(pdev);
|
||||
pci_iounmap(pdev, hw->mem_addr);
|
||||
free_device:
|
||||
kfree(dev);
|
||||
release_regions:
|
||||
pci_release_regions(pdev);
|
||||
disable_device:
|
||||
pci_disable_device(pdev);
|
||||
end:
|
||||
dev_err(&pdev->dev, "initialization failed.\n");
|
||||
return err;
|
||||
@ -267,7 +252,6 @@ end:
|
||||
static void mei_me_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_me_hw *hw;
|
||||
|
||||
dev = pci_get_drvdata(pdev);
|
||||
if (!dev)
|
||||
@ -276,33 +260,19 @@ static void mei_me_remove(struct pci_dev *pdev)
|
||||
if (mei_pg_is_enabled(dev))
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
hw = to_me_hw(dev);
|
||||
|
||||
|
||||
dev_dbg(&pdev->dev, "stop\n");
|
||||
mei_stop(dev);
|
||||
|
||||
if (!pci_dev_run_wake(pdev))
|
||||
mei_me_unset_pm_domain(dev);
|
||||
|
||||
/* disable interrupts */
|
||||
mei_disable_interrupts(dev);
|
||||
|
||||
free_irq(pdev->irq, dev);
|
||||
pci_disable_msi(pdev);
|
||||
|
||||
if (hw->mem_addr)
|
||||
pci_iounmap(pdev, hw->mem_addr);
|
||||
|
||||
mei_deregister(dev);
|
||||
|
||||
kfree(dev);
|
||||
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int mei_me_pci_suspend(struct device *device)
|
||||
{
|
||||
|
@ -52,17 +52,6 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev) {}
|
||||
static inline void mei_txe_unset_pm_domain(struct mei_device *dev) {}
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
static void mei_txe_pci_iounmap(struct pci_dev *pdev, struct mei_txe_hw *hw)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
|
||||
if (hw->mem_addr[i]) {
|
||||
pci_iounmap(pdev, hw->mem_addr[i]);
|
||||
hw->mem_addr[i] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
/**
|
||||
* mei_txe_probe - Device Initialization Routine
|
||||
*
|
||||
@ -75,22 +64,22 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_txe_hw *hw;
|
||||
const int mask = BIT(SEC_BAR) | BIT(BRIDGE_BAR);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
/* enable pci dev */
|
||||
err = pci_enable_device(pdev);
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to enable pci device.\n");
|
||||
goto end;
|
||||
}
|
||||
/* set PCI host mastering */
|
||||
pci_set_master(pdev);
|
||||
/* pci request regions for mei driver */
|
||||
err = pci_request_regions(pdev, KBUILD_MODNAME);
|
||||
/* pci request regions and mapping IO device memory for mei driver */
|
||||
err = pcim_iomap_regions(pdev, mask, KBUILD_MODNAME);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to get pci regions.\n");
|
||||
goto disable_device;
|
||||
goto end;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
|
||||
@ -98,7 +87,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "No suitable DMA available.\n");
|
||||
goto release_regions;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,20 +95,10 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
dev = mei_txe_dev_init(pdev);
|
||||
if (!dev) {
|
||||
err = -ENOMEM;
|
||||
goto release_regions;
|
||||
goto end;
|
||||
}
|
||||
hw = to_txe_hw(dev);
|
||||
|
||||
/* mapping IO device memory */
|
||||
for (i = SEC_BAR; i < NUM_OF_MEM_BARS; i++) {
|
||||
hw->mem_addr[i] = pci_iomap(pdev, i, 0);
|
||||
if (!hw->mem_addr[i]) {
|
||||
dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
|
||||
err = -ENOMEM;
|
||||
goto free_device;
|
||||
}
|
||||
}
|
||||
|
||||
hw->mem_addr = pcim_iomap_table(pdev);
|
||||
|
||||
pci_enable_msi(pdev);
|
||||
|
||||
@ -140,7 +119,7 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "mei: request_threaded_irq failure. irq = %d\n",
|
||||
pdev->irq);
|
||||
goto free_device;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (mei_start(dev)) {
|
||||
@ -173,23 +152,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
stop:
|
||||
mei_stop(dev);
|
||||
release_irq:
|
||||
|
||||
mei_cancel_work(dev);
|
||||
|
||||
/* disable interrupts */
|
||||
mei_disable_interrupts(dev);
|
||||
|
||||
free_irq(pdev->irq, dev);
|
||||
pci_disable_msi(pdev);
|
||||
|
||||
free_device:
|
||||
mei_txe_pci_iounmap(pdev, hw);
|
||||
|
||||
kfree(dev);
|
||||
release_regions:
|
||||
pci_release_regions(pdev);
|
||||
disable_device:
|
||||
pci_disable_device(pdev);
|
||||
end:
|
||||
dev_err(&pdev->dev, "initialization failed.\n");
|
||||
return err;
|
||||
@ -206,38 +171,24 @@ end:
|
||||
static void mei_txe_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mei_device *dev;
|
||||
struct mei_txe_hw *hw;
|
||||
|
||||
dev = pci_get_drvdata(pdev);
|
||||
if (!dev) {
|
||||
dev_err(&pdev->dev, "mei: dev =NULL\n");
|
||||
dev_err(&pdev->dev, "mei: dev == NULL\n");
|
||||
return;
|
||||
}
|
||||
|
||||
pm_runtime_get_noresume(&pdev->dev);
|
||||
|
||||
hw = to_txe_hw(dev);
|
||||
|
||||
mei_stop(dev);
|
||||
|
||||
if (!pci_dev_run_wake(pdev))
|
||||
mei_txe_unset_pm_domain(dev);
|
||||
|
||||
/* disable interrupts */
|
||||
mei_disable_interrupts(dev);
|
||||
free_irq(pdev->irq, dev);
|
||||
pci_disable_msi(pdev);
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
mei_txe_pci_iounmap(pdev, hw);
|
||||
|
||||
mei_deregister(dev);
|
||||
|
||||
kfree(dev);
|
||||
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
|
||||
|
@ -292,7 +292,6 @@ static int vop_virtio_add_device(struct vop_vdev *vdev,
|
||||
if (ret) {
|
||||
dev_err(vop_dev(vdev), "%s %d err %d\n",
|
||||
__func__, __LINE__, ret);
|
||||
kfree(vdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <generated/utsrelease.h>
|
||||
|
||||
#include <linux/io.h>
|
||||
@ -64,8 +65,6 @@
|
||||
#define LCD_MINOR 156
|
||||
#define KEYPAD_MINOR 185
|
||||
|
||||
#define PANEL_VERSION "0.9.5"
|
||||
|
||||
#define LCD_MAXBYTES 256 /* max burst write */
|
||||
|
||||
#define KEYPAD_BUFFER 64
|
||||
@ -77,8 +76,8 @@
|
||||
/* a key repeats this times INPUT_POLL_TIME */
|
||||
#define KEYPAD_REP_DELAY (2)
|
||||
|
||||
/* keep the light on this times INPUT_POLL_TIME for each flash */
|
||||
#define FLASH_LIGHT_TEMPO (200)
|
||||
/* keep the light on this many seconds for each flash */
|
||||
#define FLASH_LIGHT_TEMPO (4)
|
||||
|
||||
/* converts an r_str() input to an active high, bits string : 000BAOSE */
|
||||
#define PNL_PINPUT(a) ((((unsigned char)(a)) ^ 0x7F) >> 3)
|
||||
@ -121,8 +120,6 @@
|
||||
#define PIN_SELECP 17
|
||||
#define PIN_NOT_SET 127
|
||||
|
||||
#define LCD_FLAG_S 0x0001
|
||||
#define LCD_FLAG_ID 0x0002
|
||||
#define LCD_FLAG_B 0x0004 /* blink on */
|
||||
#define LCD_FLAG_C 0x0008 /* cursor on */
|
||||
#define LCD_FLAG_D 0x0010 /* display on */
|
||||
@ -256,7 +253,10 @@ static struct {
|
||||
int hwidth;
|
||||
int charset;
|
||||
int proto;
|
||||
int light_tempo;
|
||||
|
||||
struct delayed_work bl_work;
|
||||
struct mutex bl_tempo_lock; /* Protects access to bl_tempo */
|
||||
bool bl_tempo;
|
||||
|
||||
/* TODO: use union here? */
|
||||
struct {
|
||||
@ -661,8 +661,6 @@ static void lcd_get_bits(unsigned int port, int *val)
|
||||
}
|
||||
}
|
||||
|
||||
static void init_scan_timer(void);
|
||||
|
||||
/* sets data port bits according to current signals values */
|
||||
static int set_data_bits(void)
|
||||
{
|
||||
@ -794,11 +792,8 @@ static void lcd_send_serial(int byte)
|
||||
}
|
||||
|
||||
/* turn the backlight on or off */
|
||||
static void lcd_backlight(int on)
|
||||
static void __lcd_backlight(int on)
|
||||
{
|
||||
if (lcd.pins.bl == PIN_NONE)
|
||||
return;
|
||||
|
||||
/* The backlight is activated by setting the AUTOFEED line to +5V */
|
||||
spin_lock_irq(&pprt_lock);
|
||||
if (on)
|
||||
@ -809,6 +804,44 @@ static void lcd_backlight(int on)
|
||||
spin_unlock_irq(&pprt_lock);
|
||||
}
|
||||
|
||||
static void lcd_backlight(int on)
|
||||
{
|
||||
if (lcd.pins.bl == PIN_NONE)
|
||||
return;
|
||||
|
||||
mutex_lock(&lcd.bl_tempo_lock);
|
||||
if (!lcd.bl_tempo)
|
||||
__lcd_backlight(on);
|
||||
mutex_unlock(&lcd.bl_tempo_lock);
|
||||
}
|
||||
|
||||
static void lcd_bl_off(struct work_struct *work)
|
||||
{
|
||||
mutex_lock(&lcd.bl_tempo_lock);
|
||||
if (lcd.bl_tempo) {
|
||||
lcd.bl_tempo = false;
|
||||
if (!(lcd.flags & LCD_FLAG_L))
|
||||
__lcd_backlight(0);
|
||||
}
|
||||
mutex_unlock(&lcd.bl_tempo_lock);
|
||||
}
|
||||
|
||||
/* turn the backlight on for a little while */
|
||||
static void lcd_poke(void)
|
||||
{
|
||||
if (lcd.pins.bl == PIN_NONE)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&lcd.bl_work);
|
||||
|
||||
mutex_lock(&lcd.bl_tempo_lock);
|
||||
if (!lcd.bl_tempo && !(lcd.flags & LCD_FLAG_L))
|
||||
__lcd_backlight(1);
|
||||
lcd.bl_tempo = true;
|
||||
schedule_delayed_work(&lcd.bl_work, FLASH_LIGHT_TEMPO * HZ);
|
||||
mutex_unlock(&lcd.bl_tempo_lock);
|
||||
}
|
||||
|
||||
/* send a command to the LCD panel in serial mode */
|
||||
static void lcd_write_cmd_s(int cmd)
|
||||
{
|
||||
@ -907,6 +940,13 @@ static void lcd_gotoxy(void)
|
||||
(lcd.hwidth - 1) : lcd.bwidth - 1));
|
||||
}
|
||||
|
||||
static void lcd_home(void)
|
||||
{
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
}
|
||||
|
||||
static void lcd_print(char c)
|
||||
{
|
||||
if (lcd.addr.x < lcd.bwidth) {
|
||||
@ -925,9 +965,7 @@ static void lcd_clear_fast_s(void)
|
||||
{
|
||||
int pos;
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
|
||||
spin_lock_irq(&pprt_lock);
|
||||
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
|
||||
@ -939,9 +977,7 @@ static void lcd_clear_fast_s(void)
|
||||
}
|
||||
spin_unlock_irq(&pprt_lock);
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
}
|
||||
|
||||
/* fills the display with spaces and resets X/Y */
|
||||
@ -949,9 +985,7 @@ static void lcd_clear_fast_p8(void)
|
||||
{
|
||||
int pos;
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
|
||||
spin_lock_irq(&pprt_lock);
|
||||
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
|
||||
@ -977,9 +1011,7 @@ static void lcd_clear_fast_p8(void)
|
||||
}
|
||||
spin_unlock_irq(&pprt_lock);
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
}
|
||||
|
||||
/* fills the display with spaces and resets X/Y */
|
||||
@ -987,9 +1019,7 @@ static void lcd_clear_fast_tilcd(void)
|
||||
{
|
||||
int pos;
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
|
||||
spin_lock_irq(&pprt_lock);
|
||||
for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
|
||||
@ -1000,9 +1030,7 @@ static void lcd_clear_fast_tilcd(void)
|
||||
|
||||
spin_unlock_irq(&pprt_lock);
|
||||
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
}
|
||||
|
||||
/* clears the display and resets X/Y */
|
||||
@ -1108,13 +1136,8 @@ static inline int handle_lcd_special_code(void)
|
||||
processed = 1;
|
||||
break;
|
||||
case '*':
|
||||
/* flash back light using the keypad timer */
|
||||
if (scan_timer.function) {
|
||||
if (lcd.light_tempo == 0 &&
|
||||
((lcd.flags & LCD_FLAG_L) == 0))
|
||||
lcd_backlight(1);
|
||||
lcd.light_tempo = FLASH_LIGHT_TEMPO;
|
||||
}
|
||||
/* flash back light */
|
||||
lcd_poke();
|
||||
processed = 1;
|
||||
break;
|
||||
case 'f': /* Small Font */
|
||||
@ -1278,21 +1301,14 @@ static inline int handle_lcd_special_code(void)
|
||||
lcd_write_cmd(LCD_CMD_FUNCTION_SET
|
||||
| LCD_CMD_DATA_LEN_8BITS
|
||||
| ((lcd.flags & LCD_FLAG_F)
|
||||
? LCD_CMD_TWO_LINES : 0)
|
||||
| ((lcd.flags & LCD_FLAG_N)
|
||||
? LCD_CMD_FONT_5X10_DOTS
|
||||
: 0)
|
||||
| ((lcd.flags & LCD_FLAG_N)
|
||||
? LCD_CMD_TWO_LINES
|
||||
: 0));
|
||||
/* check whether L flag was changed */
|
||||
else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L)) {
|
||||
if (lcd.flags & (LCD_FLAG_L))
|
||||
lcd_backlight(1);
|
||||
else if (lcd.light_tempo == 0)
|
||||
/*
|
||||
* switch off the light only when the tempo
|
||||
* lighting is gone
|
||||
*/
|
||||
lcd_backlight(0);
|
||||
}
|
||||
else if ((oldflags ^ lcd.flags) & (LCD_FLAG_L))
|
||||
lcd_backlight(!!(lcd.flags & LCD_FLAG_L));
|
||||
}
|
||||
|
||||
return processed;
|
||||
@ -1376,9 +1392,7 @@ static void lcd_write_char(char c)
|
||||
processed = 1;
|
||||
} else if (!strcmp(lcd.esc_seq.buf, "[H")) {
|
||||
/* cursor to home */
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
processed = 1;
|
||||
}
|
||||
/* codes starting with ^[[L */
|
||||
@ -1625,8 +1639,10 @@ static void lcd_init(void)
|
||||
else
|
||||
lcd_char_conv = NULL;
|
||||
|
||||
if (lcd.pins.bl != PIN_NONE)
|
||||
init_scan_timer();
|
||||
if (lcd.pins.bl != PIN_NONE) {
|
||||
mutex_init(&lcd.bl_tempo_lock);
|
||||
INIT_DELAYED_WORK(&lcd.bl_work, lcd_bl_off);
|
||||
}
|
||||
|
||||
pin_to_bits(lcd.pins.e, lcd_bits[LCD_PORT_D][LCD_BIT_E],
|
||||
lcd_bits[LCD_PORT_C][LCD_BIT_E]);
|
||||
@ -1655,14 +1671,11 @@ static void lcd_init(void)
|
||||
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
|
||||
#endif
|
||||
#else
|
||||
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\nPanel-"
|
||||
PANEL_VERSION);
|
||||
panel_lcd_print("\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE);
|
||||
#endif
|
||||
lcd.addr.x = 0;
|
||||
lcd.addr.y = 0;
|
||||
/* clear the display on the next device opening */
|
||||
lcd.must_clear = true;
|
||||
lcd_gotoxy();
|
||||
lcd_home();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1997,19 +2010,8 @@ static void panel_scan_timer(void)
|
||||
panel_process_inputs();
|
||||
}
|
||||
|
||||
if (lcd.enabled && lcd.initialized) {
|
||||
if (keypressed) {
|
||||
if (lcd.light_tempo == 0 &&
|
||||
((lcd.flags & LCD_FLAG_L) == 0))
|
||||
lcd_backlight(1);
|
||||
lcd.light_tempo = FLASH_LIGHT_TEMPO;
|
||||
} else if (lcd.light_tempo > 0) {
|
||||
lcd.light_tempo--;
|
||||
if (lcd.light_tempo == 0 &&
|
||||
((lcd.flags & LCD_FLAG_L) == 0))
|
||||
lcd_backlight(0);
|
||||
}
|
||||
}
|
||||
if (keypressed && lcd.enabled && lcd.initialized)
|
||||
lcd_poke();
|
||||
|
||||
mod_timer(&scan_timer, jiffies + INPUT_POLL_TIME);
|
||||
}
|
||||
@ -2270,25 +2272,26 @@ static void panel_detach(struct parport *port)
|
||||
if (scan_timer.function)
|
||||
del_timer_sync(&scan_timer);
|
||||
|
||||
if (pprt) {
|
||||
if (keypad.enabled) {
|
||||
misc_deregister(&keypad_dev);
|
||||
keypad_initialized = 0;
|
||||
}
|
||||
|
||||
if (lcd.enabled) {
|
||||
panel_lcd_print("\x0cLCD driver " PANEL_VERSION
|
||||
"\nunloaded.\x1b[Lc\x1b[Lb\x1b[L-");
|
||||
misc_deregister(&lcd_dev);
|
||||
lcd.initialized = false;
|
||||
}
|
||||
|
||||
/* TODO: free all input signals */
|
||||
parport_release(pprt);
|
||||
parport_unregister_device(pprt);
|
||||
pprt = NULL;
|
||||
unregister_reboot_notifier(&panel_notifier);
|
||||
if (keypad.enabled) {
|
||||
misc_deregister(&keypad_dev);
|
||||
keypad_initialized = 0;
|
||||
}
|
||||
|
||||
if (lcd.enabled) {
|
||||
panel_lcd_print("\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
|
||||
misc_deregister(&lcd_dev);
|
||||
if (lcd.pins.bl != PIN_NONE) {
|
||||
cancel_delayed_work_sync(&lcd.bl_work);
|
||||
__lcd_backlight(0);
|
||||
}
|
||||
lcd.initialized = false;
|
||||
}
|
||||
|
||||
/* TODO: free all input signals */
|
||||
parport_release(pprt);
|
||||
parport_unregister_device(pprt);
|
||||
pprt = NULL;
|
||||
unregister_reboot_notifier(&panel_notifier);
|
||||
}
|
||||
|
||||
static struct parport_driver panel_driver = {
|
||||
@ -2400,7 +2403,7 @@ static int __init panel_init_module(void)
|
||||
|
||||
if (!lcd.enabled && !keypad.enabled) {
|
||||
/* no device enabled, let's exit */
|
||||
pr_err("driver version " PANEL_VERSION " disabled.\n");
|
||||
pr_err("panel driver disabled.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -2411,12 +2414,10 @@ static int __init panel_init_module(void)
|
||||
}
|
||||
|
||||
if (pprt)
|
||||
pr_info("driver version " PANEL_VERSION
|
||||
" registered on parport%d (io=0x%lx).\n", parport,
|
||||
pprt->port->base);
|
||||
pr_info("panel driver registered on parport%d (io=0x%lx).\n",
|
||||
parport, pprt->port->base);
|
||||
else
|
||||
pr_info("driver version " PANEL_VERSION
|
||||
" not yet registered\n");
|
||||
pr_info("panel driver not yet registered\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
105
drivers/misc/sram-exec.c
Normal file
105
drivers/misc/sram-exec.c
Normal file
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* SRAM protect-exec region helper functions
|
||||
*
|
||||
* Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/
|
||||
* Dave Gerlach
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
||||
* kind, whether express or implied; without even the implied warranty
|
||||
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/sram.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "sram.h"
|
||||
|
||||
static DEFINE_MUTEX(exec_pool_list_mutex);
|
||||
static LIST_HEAD(exec_pool_list);
|
||||
|
||||
int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
|
||||
struct sram_partition *part)
|
||||
{
|
||||
unsigned long base = (unsigned long)part->base;
|
||||
unsigned long end = base + block->size;
|
||||
|
||||
if (!PAGE_ALIGNED(base) || !PAGE_ALIGNED(end)) {
|
||||
dev_err(sram->dev,
|
||||
"SRAM pool marked with 'protect-exec' is not page aligned and will not be created.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sram_add_protect_exec(struct sram_partition *part)
|
||||
{
|
||||
mutex_lock(&exec_pool_list_mutex);
|
||||
list_add_tail(&part->list, &exec_pool_list);
|
||||
mutex_unlock(&exec_pool_list_mutex);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sram_exec_copy - copy data to a protected executable region of sram
|
||||
*
|
||||
* @pool: struct gen_pool retrieved that is part of this sram
|
||||
* @dst: Destination address for the copy, that must be inside pool
|
||||
* @src: Source address for the data to copy
|
||||
* @size: Size of copy to perform, which starting from dst, must reside in pool
|
||||
*
|
||||
* This helper function allows sram driver to act as central control location
|
||||
* of 'protect-exec' pools which are normal sram pools but are always set
|
||||
* read-only and executable except when copying data to them, at which point
|
||||
* they are set to read-write non-executable, to make sure no memory is
|
||||
* writeable and executable at the same time. This region must be page-aligned
|
||||
* and is checked during probe, otherwise page attribute manipulation would
|
||||
* not be possible.
|
||||
*/
|
||||
int sram_exec_copy(struct gen_pool *pool, void *dst, void *src,
|
||||
size_t size)
|
||||
{
|
||||
struct sram_partition *part = NULL, *p;
|
||||
unsigned long base;
|
||||
int pages;
|
||||
|
||||
mutex_lock(&exec_pool_list_mutex);
|
||||
list_for_each_entry(p, &exec_pool_list, list) {
|
||||
if (p->pool == pool)
|
||||
part = p;
|
||||
}
|
||||
mutex_unlock(&exec_pool_list_mutex);
|
||||
|
||||
if (!part)
|
||||
return -EINVAL;
|
||||
|
||||
if (!addr_in_gen_pool(pool, (unsigned long)dst, size))
|
||||
return -EINVAL;
|
||||
|
||||
base = (unsigned long)part->base;
|
||||
pages = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
|
||||
mutex_lock(&part->lock);
|
||||
|
||||
set_memory_nx((unsigned long)base, pages);
|
||||
set_memory_rw((unsigned long)base, pages);
|
||||
|
||||
memcpy(dst, src, size);
|
||||
|
||||
set_memory_ro((unsigned long)base, pages);
|
||||
set_memory_x((unsigned long)base, pages);
|
||||
|
||||
mutex_unlock(&part->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sram_exec_copy);
|
@ -31,36 +31,10 @@
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <soc/at91/atmel-secumod.h>
|
||||
|
||||
#include "sram.h"
|
||||
|
||||
#define SRAM_GRANULARITY 32
|
||||
|
||||
struct sram_partition {
|
||||
void __iomem *base;
|
||||
|
||||
struct gen_pool *pool;
|
||||
struct bin_attribute battr;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct sram_dev {
|
||||
struct device *dev;
|
||||
void __iomem *virt_base;
|
||||
|
||||
struct gen_pool *pool;
|
||||
struct clk *clk;
|
||||
|
||||
struct sram_partition *partition;
|
||||
u32 partitions;
|
||||
};
|
||||
|
||||
struct sram_reserve {
|
||||
struct list_head list;
|
||||
u32 start;
|
||||
u32 size;
|
||||
bool export;
|
||||
bool pool;
|
||||
const char *label;
|
||||
};
|
||||
|
||||
static ssize_t sram_read(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *attr,
|
||||
char *buf, loff_t pos, size_t count)
|
||||
@ -148,6 +122,18 @@ static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (block->protect_exec) {
|
||||
ret = sram_check_protect_exec(sram, block, part);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sram_add_pool(sram, block, start, part);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sram_add_protect_exec(part);
|
||||
}
|
||||
|
||||
sram->partitions++;
|
||||
|
||||
return 0;
|
||||
@ -233,7 +219,11 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
|
||||
if (of_find_property(child, "pool", NULL))
|
||||
block->pool = true;
|
||||
|
||||
if ((block->export || block->pool) && block->size) {
|
||||
if (of_find_property(child, "protect-exec", NULL))
|
||||
block->protect_exec = true;
|
||||
|
||||
if ((block->export || block->pool || block->protect_exec) &&
|
||||
block->size) {
|
||||
exports++;
|
||||
|
||||
label = NULL;
|
||||
@ -249,8 +239,10 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
|
||||
|
||||
block->label = devm_kstrdup(sram->dev,
|
||||
label, GFP_KERNEL);
|
||||
if (!block->label)
|
||||
if (!block->label) {
|
||||
ret = -ENOMEM;
|
||||
goto err_chunks;
|
||||
}
|
||||
|
||||
dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
|
||||
block->export ? "exported " : "", block->label,
|
||||
@ -293,7 +285,8 @@ static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
|
||||
goto err_chunks;
|
||||
}
|
||||
|
||||
if ((block->export || block->pool) && block->size) {
|
||||
if ((block->export || block->pool || block->protect_exec) &&
|
||||
block->size) {
|
||||
ret = sram_add_partition(sram, block,
|
||||
res->start + block->start);
|
||||
if (ret) {
|
||||
|
58
drivers/misc/sram.h
Normal file
58
drivers/misc/sram.h
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Defines for the SRAM driver
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef __SRAM_H
|
||||
#define __SRAM_H
|
||||
|
||||
struct sram_partition {
|
||||
void __iomem *base;
|
||||
|
||||
struct gen_pool *pool;
|
||||
struct bin_attribute battr;
|
||||
struct mutex lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct sram_dev {
|
||||
struct device *dev;
|
||||
void __iomem *virt_base;
|
||||
|
||||
struct gen_pool *pool;
|
||||
struct clk *clk;
|
||||
|
||||
struct sram_partition *partition;
|
||||
u32 partitions;
|
||||
};
|
||||
|
||||
struct sram_reserve {
|
||||
struct list_head list;
|
||||
u32 start;
|
||||
u32 size;
|
||||
bool export;
|
||||
bool pool;
|
||||
bool protect_exec;
|
||||
const char *label;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SRAM_EXEC
|
||||
int sram_check_protect_exec(struct sram_dev *sram, struct sram_reserve *block,
|
||||
struct sram_partition *part);
|
||||
int sram_add_protect_exec(struct sram_partition *part);
|
||||
#else
|
||||
static inline int sram_check_protect_exec(struct sram_dev *sram,
|
||||
struct sram_reserve *block,
|
||||
struct sram_partition *part)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int sram_add_protect_exec(struct sram_partition *part)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_SRAM_EXEC */
|
||||
#endif /* __SRAM_H */
|
@ -54,10 +54,7 @@ struct vmci_guest_device {
|
||||
struct device *dev; /* PCI device we are attached to */
|
||||
void __iomem *iobase;
|
||||
|
||||
unsigned int irq;
|
||||
unsigned int intr_type;
|
||||
bool exclusive_vectors;
|
||||
struct msix_entry msix_entries[VMCI_MAX_INTRS];
|
||||
|
||||
struct tasklet_struct datagram_tasklet;
|
||||
struct tasklet_struct bm_tasklet;
|
||||
@ -368,30 +365,6 @@ static void vmci_process_bitmap(unsigned long data)
|
||||
vmci_dbell_scan_notification_entries(dev->notification_bitmap);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable MSI-X. Try exclusive vectors first, then shared vectors.
|
||||
*/
|
||||
static int vmci_enable_msix(struct pci_dev *pdev,
|
||||
struct vmci_guest_device *vmci_dev)
|
||||
{
|
||||
int i;
|
||||
int result;
|
||||
|
||||
for (i = 0; i < VMCI_MAX_INTRS; ++i) {
|
||||
vmci_dev->msix_entries[i].entry = i;
|
||||
vmci_dev->msix_entries[i].vector = i;
|
||||
}
|
||||
|
||||
result = pci_enable_msix_exact(pdev,
|
||||
vmci_dev->msix_entries, VMCI_MAX_INTRS);
|
||||
if (result == 0)
|
||||
vmci_dev->exclusive_vectors = true;
|
||||
else if (result == -ENOSPC)
|
||||
result = pci_enable_msix_exact(pdev, vmci_dev->msix_entries, 1);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler for legacy or MSI interrupt, or for first MSI-X
|
||||
* interrupt (vector VMCI_INTR_DATAGRAM).
|
||||
@ -406,7 +379,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
|
||||
* Otherwise we must read the ICR to determine what to do.
|
||||
*/
|
||||
|
||||
if (dev->intr_type == VMCI_INTR_TYPE_MSIX && dev->exclusive_vectors) {
|
||||
if (dev->exclusive_vectors) {
|
||||
tasklet_schedule(&dev->datagram_tasklet);
|
||||
} else {
|
||||
unsigned int icr;
|
||||
@ -491,7 +464,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
|
||||
}
|
||||
|
||||
vmci_dev->dev = &pdev->dev;
|
||||
vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
|
||||
vmci_dev->exclusive_vectors = false;
|
||||
vmci_dev->iobase = iobase;
|
||||
|
||||
@ -592,26 +564,26 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
|
||||
* Enable interrupts. Try MSI-X first, then MSI, and then fallback on
|
||||
* legacy interrupts.
|
||||
*/
|
||||
if (!vmci_disable_msix && !vmci_enable_msix(pdev, vmci_dev)) {
|
||||
vmci_dev->intr_type = VMCI_INTR_TYPE_MSIX;
|
||||
vmci_dev->irq = vmci_dev->msix_entries[0].vector;
|
||||
} else if (!vmci_disable_msi && !pci_enable_msi(pdev)) {
|
||||
vmci_dev->intr_type = VMCI_INTR_TYPE_MSI;
|
||||
vmci_dev->irq = pdev->irq;
|
||||
error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS,
|
||||
PCI_IRQ_MSIX);
|
||||
if (error) {
|
||||
error = pci_alloc_irq_vectors(pdev, 1, 1,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY);
|
||||
if (error)
|
||||
goto err_remove_bitmap;
|
||||
} else {
|
||||
vmci_dev->intr_type = VMCI_INTR_TYPE_INTX;
|
||||
vmci_dev->irq = pdev->irq;
|
||||
vmci_dev->exclusive_vectors = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request IRQ for legacy or MSI interrupts, or for first
|
||||
* MSI-X vector.
|
||||
*/
|
||||
error = request_irq(vmci_dev->irq, vmci_interrupt, IRQF_SHARED,
|
||||
KBUILD_MODNAME, vmci_dev);
|
||||
error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
|
||||
IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Irq %u in use: %d\n",
|
||||
vmci_dev->irq, error);
|
||||
pci_irq_vector(pdev, 0), error);
|
||||
goto err_disable_msi;
|
||||
}
|
||||
|
||||
@ -622,13 +594,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
|
||||
* between the vectors.
|
||||
*/
|
||||
if (vmci_dev->exclusive_vectors) {
|
||||
error = request_irq(vmci_dev->msix_entries[1].vector,
|
||||
error = request_irq(pci_irq_vector(pdev, 1),
|
||||
vmci_interrupt_bm, 0, KBUILD_MODNAME,
|
||||
vmci_dev);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to allocate irq %u: %d\n",
|
||||
vmci_dev->msix_entries[1].vector, error);
|
||||
pci_irq_vector(pdev, 1), error);
|
||||
goto err_free_irq;
|
||||
}
|
||||
}
|
||||
@ -651,15 +623,12 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
|
||||
return 0;
|
||||
|
||||
err_free_irq:
|
||||
free_irq(vmci_dev->irq, vmci_dev);
|
||||
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
|
||||
tasklet_kill(&vmci_dev->datagram_tasklet);
|
||||
tasklet_kill(&vmci_dev->bm_tasklet);
|
||||
|
||||
err_disable_msi:
|
||||
if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX)
|
||||
pci_disable_msix(pdev);
|
||||
else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI)
|
||||
pci_disable_msi(pdev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
vmci_err = vmci_event_unsubscribe(ctx_update_sub_id);
|
||||
if (vmci_err < VMCI_SUCCESS)
|
||||
@ -719,14 +688,10 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
|
||||
* MSI-X, we might have multiple vectors, each with their own
|
||||
* IRQ, which we must free too.
|
||||
*/
|
||||
free_irq(vmci_dev->irq, vmci_dev);
|
||||
if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSIX) {
|
||||
if (vmci_dev->exclusive_vectors)
|
||||
free_irq(vmci_dev->msix_entries[1].vector, vmci_dev);
|
||||
pci_disable_msix(pdev);
|
||||
} else if (vmci_dev->intr_type == VMCI_INTR_TYPE_MSI) {
|
||||
pci_disable_msi(pdev);
|
||||
}
|
||||
if (vmci_dev->exclusive_vectors)
|
||||
free_irq(pci_irq_vector(pdev, 1), vmci_dev);
|
||||
free_irq(pci_irq_vector(pdev, 0), vmci_dev);
|
||||
pci_free_irq_vectors(pdev);
|
||||
|
||||
tasklet_kill(&vmci_dev->datagram_tasklet);
|
||||
tasklet_kill(&vmci_dev->bm_tasklet);
|
||||
|
@ -703,8 +703,6 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
char *dest = start + (section_index * net_device->send_section_size)
|
||||
+ pend_size;
|
||||
int i;
|
||||
bool is_data_pkt = (skb != NULL) ? true : false;
|
||||
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
|
||||
u32 msg_size = 0;
|
||||
u32 padding = 0;
|
||||
u32 remain = packet->total_data_buflen % net_device->pkt_align;
|
||||
@ -712,7 +710,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
|
||||
packet->page_buf_cnt;
|
||||
|
||||
/* Add padding */
|
||||
if (is_data_pkt && xmit_more && remain &&
|
||||
if (skb && skb->xmit_more && remain &&
|
||||
!packet->cp_partial) {
|
||||
padding = net_device->pkt_align - remain;
|
||||
rndis_msg->msg_len += padding;
|
||||
@ -754,7 +752,6 @@ static inline int netvsc_send_pkt(
|
||||
int ret;
|
||||
struct hv_page_buffer *pgbuf;
|
||||
u32 ring_avail = hv_ringbuf_avail_percent(&out_channel->outbound);
|
||||
bool xmit_more = (skb != NULL) ? skb->xmit_more : false;
|
||||
|
||||
nvmsg.hdr.msg_type = NVSP_MSG1_TYPE_SEND_RNDIS_PKT;
|
||||
if (skb != NULL) {
|
||||
@ -778,16 +775,6 @@ static inline int netvsc_send_pkt(
|
||||
if (out_channel->rescind)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* It is possible that once we successfully place this packet
|
||||
* on the ringbuffer, we may stop the queue. In that case, we want
|
||||
* to notify the host independent of the xmit_more flag. We don't
|
||||
* need to be precise here; in the worst case we may signal the host
|
||||
* unnecessarily.
|
||||
*/
|
||||
if (ring_avail < (RING_AVAIL_PERCENT_LOWATER + 1))
|
||||
xmit_more = false;
|
||||
|
||||
if (packet->page_buf_cnt) {
|
||||
pgbuf = packet->cp_partial ? (*pb) +
|
||||
packet->rmsg_pgcnt : (*pb);
|
||||
@ -797,15 +784,13 @@ static inline int netvsc_send_pkt(
|
||||
&nvmsg,
|
||||
sizeof(struct nvsp_message),
|
||||
req_id,
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
||||
!xmit_more);
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
} else {
|
||||
ret = vmbus_sendpacket_ctl(out_channel, &nvmsg,
|
||||
sizeof(struct nvsp_message),
|
||||
req_id,
|
||||
VM_PKT_DATA_INBAND,
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED,
|
||||
!xmit_more);
|
||||
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
|
@ -608,7 +608,7 @@ static struct nvmem_device *nvmem_find(const char *name)
|
||||
/**
|
||||
* of_nvmem_device_get() - Get nvmem device from a given id
|
||||
*
|
||||
* @dev node: Device tree node that uses the nvmem device
|
||||
* @np: Device tree node that uses the nvmem device.
|
||||
* @id: nvmem name from nvmem-names property.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
|
||||
@ -634,8 +634,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_device_get);
|
||||
/**
|
||||
* nvmem_device_get() - Get nvmem device from a given id
|
||||
*
|
||||
* @dev : Device that uses the nvmem device
|
||||
* @id: nvmem name from nvmem-names property.
|
||||
* @dev: Device that uses the nvmem device.
|
||||
* @dev_name: name of the requested nvmem device.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
|
||||
* on success.
|
||||
@ -674,6 +674,7 @@ static void devm_nvmem_device_release(struct device *dev, void *res)
|
||||
/**
|
||||
* devm_nvmem_device_put() - put alredy got nvmem device
|
||||
*
|
||||
* @dev: Device that uses the nvmem device.
|
||||
* @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
|
||||
* that needs to be released.
|
||||
*/
|
||||
@ -702,8 +703,8 @@ EXPORT_SYMBOL_GPL(nvmem_device_put);
|
||||
/**
|
||||
* devm_nvmem_device_get() - Get nvmem cell of device form a given id
|
||||
*
|
||||
* @dev node: Device tree node that uses the nvmem cell
|
||||
* @id: nvmem name in nvmems property.
|
||||
* @dev: Device that requests the nvmem device.
|
||||
* @id: name id for the requested nvmem device.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
|
||||
* on success. The nvmem_cell will be freed by the automatically once the
|
||||
@ -745,8 +746,10 @@ static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
|
||||
/**
|
||||
* of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
|
||||
*
|
||||
* @dev node: Device tree node that uses the nvmem cell
|
||||
* @id: nvmem cell name from nvmem-cell-names property.
|
||||
* @np: Device tree node that uses the nvmem cell.
|
||||
* @name: nvmem cell name from nvmem-cell-names property, or NULL
|
||||
* for the cell at index 0 (the lone cell with no accompanying
|
||||
* nvmem-cell-names property).
|
||||
*
|
||||
* Return: Will be an ERR_PTR() on error or a valid pointer
|
||||
* to a struct nvmem_cell. The nvmem_cell will be freed by the
|
||||
@ -759,9 +762,12 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
|
||||
struct nvmem_cell *cell;
|
||||
struct nvmem_device *nvmem;
|
||||
const __be32 *addr;
|
||||
int rval, len, index;
|
||||
int rval, len;
|
||||
int index = 0;
|
||||
|
||||
index = of_property_match_string(np, "nvmem-cell-names", name);
|
||||
/* if cell name exists, find index to the name */
|
||||
if (name)
|
||||
index = of_property_match_string(np, "nvmem-cell-names", name);
|
||||
|
||||
cell_np = of_parse_phandle(np, "nvmem-cells", index);
|
||||
if (!cell_np)
|
||||
@ -830,8 +836,8 @@ EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
|
||||
/**
|
||||
* nvmem_cell_get() - Get nvmem cell of device form a given cell name
|
||||
*
|
||||
* @dev node: Device tree node that uses the nvmem cell
|
||||
* @id: nvmem cell name to get.
|
||||
* @dev: Device that requests the nvmem cell.
|
||||
* @cell_id: nvmem cell name to get.
|
||||
*
|
||||
* Return: Will be an ERR_PTR() on error or a valid pointer
|
||||
* to a struct nvmem_cell. The nvmem_cell will be freed by the
|
||||
@ -859,8 +865,8 @@ static void devm_nvmem_cell_release(struct device *dev, void *res)
|
||||
/**
|
||||
* devm_nvmem_cell_get() - Get nvmem cell of device form a given id
|
||||
*
|
||||
* @dev node: Device tree node that uses the nvmem cell
|
||||
* @id: nvmem id in nvmem-names property.
|
||||
* @dev: Device that requests the nvmem cell.
|
||||
* @id: nvmem cell name id to get.
|
||||
*
|
||||
* Return: Will be an ERR_PTR() on error or a valid pointer
|
||||
* to a struct nvmem_cell. The nvmem_cell will be freed by the
|
||||
@ -900,7 +906,8 @@ static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
|
||||
* devm_nvmem_cell_put() - Release previously allocated nvmem cell
|
||||
* from devm_nvmem_cell_get.
|
||||
*
|
||||
* @cell: Previously allocated nvmem cell by devm_nvmem_cell_get()
|
||||
* @dev: Device that requests the nvmem cell.
|
||||
* @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
|
||||
*/
|
||||
void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
|
||||
{
|
||||
@ -916,7 +923,7 @@ EXPORT_SYMBOL(devm_nvmem_cell_put);
|
||||
/**
|
||||
* nvmem_cell_put() - Release previously allocated nvmem cell.
|
||||
*
|
||||
* @cell: Previously allocated nvmem cell by nvmem_cell_get()
|
||||
* @cell: Previously allocated nvmem cell by nvmem_cell_get().
|
||||
*/
|
||||
void nvmem_cell_put(struct nvmem_cell *cell)
|
||||
{
|
||||
@ -970,7 +977,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
|
||||
if (cell->bit_offset || cell->nbits)
|
||||
nvmem_shift_read_buffer_in_place(cell, buf);
|
||||
|
||||
*len = cell->bytes;
|
||||
if (len)
|
||||
*len = cell->bytes;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -979,7 +987,8 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
|
||||
* nvmem_cell_read() - Read a given nvmem cell
|
||||
*
|
||||
* @cell: nvmem cell to be read.
|
||||
* @len: pointer to length of cell which will be populated on successful read.
|
||||
* @len: pointer to length of cell which will be populated on successful read;
|
||||
* can be NULL.
|
||||
*
|
||||
* Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
|
||||
* buffer should be freed by the consumer with a kfree().
|
||||
@ -1126,7 +1135,7 @@ EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
|
||||
* nvmem_device_cell_write() - Write cell to a given nvmem device
|
||||
*
|
||||
* @nvmem: nvmem device to be written to.
|
||||
* @info: nvmem cell info to be written
|
||||
* @info: nvmem cell info to be written.
|
||||
* @buf: buffer to be written to cell.
|
||||
*
|
||||
* Return: length of bytes written or negative error code on failure.
|
||||
|
@ -73,6 +73,7 @@ static const struct of_device_id imx_ocotp_dt_ids[] = {
|
||||
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
|
||||
{ .compatible = "fsl,imx6sl-ocotp", (void *)64 },
|
||||
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
|
||||
{ .compatible = "fsl,imx6ul-ocotp", (void *)128 },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids);
|
||||
|
@ -157,23 +157,26 @@ static int goldfish_new_pdev(void)
|
||||
static irqreturn_t goldfish_pdev_bus_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
|
||||
while (1) {
|
||||
u32 op = readl(pdev_bus_base + PDEV_BUS_OP);
|
||||
switch (op) {
|
||||
case PDEV_BUS_OP_DONE:
|
||||
return IRQ_NONE;
|
||||
|
||||
switch (op) {
|
||||
case PDEV_BUS_OP_REMOVE_DEV:
|
||||
goldfish_pdev_remove();
|
||||
ret = IRQ_HANDLED;
|
||||
break;
|
||||
|
||||
case PDEV_BUS_OP_ADD_DEV:
|
||||
goldfish_new_pdev();
|
||||
ret = IRQ_HANDLED;
|
||||
break;
|
||||
|
||||
case PDEV_BUS_OP_DONE:
|
||||
default:
|
||||
return ret;
|
||||
}
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int goldfish_pdev_bus_probe(struct platform_device *pdev)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user