Merge 5.12-rc6 into driver-core-next
We need the driver core fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
b20e829390
3
.mailmap
3
.mailmap
@ -36,6 +36,7 @@ Andrew Morton <akpm@linux-foundation.org>
|
|||||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
Andrew Murray <amurray@thegoodpenguin.co.uk> <amurray@embedded-bits.co.uk>
|
||||||
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
|
||||||
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
Andrew Vasquez <andrew.vasquez@qlogic.com>
|
||||||
|
Andrey Konovalov <andreyknvl@gmail.com> <andreyknvl@google.com>
|
||||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
|
||||||
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
|
||||||
Andy Adamson <andros@citi.umich.edu>
|
Andy Adamson <andros@citi.umich.edu>
|
||||||
@ -65,6 +66,8 @@ Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
|
|||||||
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
|
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
|
||||||
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
||||||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
||||||
|
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
|
||||||
|
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||||
Christophe Ricard <christophe.ricard@gmail.com>
|
Christophe Ricard <christophe.ricard@gmail.com>
|
||||||
Christoph Hellwig <hch@lst.de>
|
Christoph Hellwig <hch@lst.de>
|
||||||
Corey Minyard <minyard@acm.org>
|
Corey Minyard <minyard@acm.org>
|
||||||
|
@ -17,12 +17,12 @@ For ACPI on arm64, tables also fall into the following categories:
|
|||||||
|
|
||||||
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
|
- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
|
||||||
|
|
||||||
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
|
- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IBFT,
|
||||||
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
|
IORT, MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT,
|
||||||
TCPA, TPM2, UEFI, XENV
|
STAO, TCPA, TPM2, UEFI, XENV
|
||||||
|
|
||||||
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
|
- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IVRS, LPIT, MSDM, OEMx,
|
||||||
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||||
|
|
||||||
====== ========================================================================
|
====== ========================================================================
|
||||||
Table Usage for ARMv8 Linux
|
Table Usage for ARMv8 Linux
|
||||||
|
@ -130,6 +130,9 @@ stable kernels.
|
|||||||
| Marvell | ARM-MMU-500 | #582743 | N/A |
|
| Marvell | ARM-MMU-500 | #582743 | N/A |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
| NVIDIA | Carmel Core | N/A | NVIDIA_CARMEL_CNP_ERRATUM |
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
|
| Freescale/NXP | LS2080A/LS1043A | A-008585 | FSL_ERRATUM_A008585 |
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
+----------------+-----------------+-----------------+-----------------------------+
|
+----------------+-----------------+-----------------+-----------------------------+
|
||||||
|
@ -267,7 +267,7 @@ DATA PATH
|
|||||||
Tx
|
Tx
|
||||||
--
|
--
|
||||||
|
|
||||||
end_start_xmit() is called by the stack. This function does the following:
|
ena_start_xmit() is called by the stack. This function does the following:
|
||||||
|
|
||||||
- Maps data buffers (skb->data and frags).
|
- Maps data buffers (skb->data and frags).
|
||||||
- Populates ena_buf for the push buffer (if the driver and device are
|
- Populates ena_buf for the push buffer (if the driver and device are
|
||||||
|
@ -52,7 +52,7 @@ purposes as a standard complementary tool. The system's view from
|
|||||||
``devlink-dpipe`` should change according to the changes done by the
|
``devlink-dpipe`` should change according to the changes done by the
|
||||||
standard configuration tools.
|
standard configuration tools.
|
||||||
|
|
||||||
For example, it’s quiet common to implement Access Control Lists (ACL)
|
For example, it’s quite common to implement Access Control Lists (ACL)
|
||||||
using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
|
using Ternary Content Addressable Memory (TCAM). The TCAM memory can be
|
||||||
divided into TCAM regions. Complex TC filters can have multiple rules with
|
divided into TCAM regions. Complex TC filters can have multiple rules with
|
||||||
different priorities and different lookup keys. On the other hand hardware
|
different priorities and different lookup keys. On the other hand hardware
|
||||||
|
@ -151,7 +151,7 @@ representor netdevice.
|
|||||||
-------------
|
-------------
|
||||||
A subfunction devlink port is created but it is not active yet. That means the
|
A subfunction devlink port is created but it is not active yet. That means the
|
||||||
entities are created on devlink side, the e-switch port representor is created,
|
entities are created on devlink side, the e-switch port representor is created,
|
||||||
but the subfunction device itself it not created. A user might use e-switch port
|
but the subfunction device itself is not created. A user might use e-switch port
|
||||||
representor to do settings, putting it into bridge, adding TC rules, etc. A user
|
representor to do settings, putting it into bridge, adding TC rules, etc. A user
|
||||||
might as well configure the hardware address (such as MAC address) of the
|
might as well configure the hardware address (such as MAC address) of the
|
||||||
subfunction while subfunction is inactive.
|
subfunction while subfunction is inactive.
|
||||||
@ -173,7 +173,7 @@ Terms and Definitions
|
|||||||
* - Term
|
* - Term
|
||||||
- Definitions
|
- Definitions
|
||||||
* - ``PCI device``
|
* - ``PCI device``
|
||||||
- A physical PCI device having one or more PCI bus consists of one or
|
- A physical PCI device having one or more PCI buses consists of one or
|
||||||
more PCI controllers.
|
more PCI controllers.
|
||||||
* - ``PCI controller``
|
* - ``PCI controller``
|
||||||
- A controller consists of potentially multiple physical functions,
|
- A controller consists of potentially multiple physical functions,
|
||||||
|
@ -50,7 +50,7 @@ Callbacks to implement
|
|||||||
|
|
||||||
The NIC driver offering ipsec offload will need to implement these
|
The NIC driver offering ipsec offload will need to implement these
|
||||||
callbacks to make the offload available to the network stack's
|
callbacks to make the offload available to the network stack's
|
||||||
XFRM subsytem. Additionally, the feature bits NETIF_F_HW_ESP and
|
XFRM subsystem. Additionally, the feature bits NETIF_F_HW_ESP and
|
||||||
NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
|
NETIF_F_HW_ESP_TX_CSUM will signal the availability of the offload.
|
||||||
|
|
||||||
|
|
||||||
|
59
MAINTAINERS
59
MAINTAINERS
@ -2489,7 +2489,7 @@ N: sc27xx
|
|||||||
N: sc2731
|
N: sc2731
|
||||||
|
|
||||||
ARM/STI ARCHITECTURE
|
ARM/STI ARCHITECTURE
|
||||||
M: Patrice Chotard <patrice.chotard@st.com>
|
M: Patrice Chotard <patrice.chotard@foss.st.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: http://www.stlinux.com
|
W: http://www.stlinux.com
|
||||||
@ -2522,7 +2522,7 @@ F: include/linux/remoteproc/st_slim_rproc.h
|
|||||||
|
|
||||||
ARM/STM32 ARCHITECTURE
|
ARM/STM32 ARCHITECTURE
|
||||||
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
|
M: Maxime Coquelin <mcoquelin.stm32@gmail.com>
|
||||||
M: Alexandre Torgue <alexandre.torgue@st.com>
|
M: Alexandre Torgue <alexandre.torgue@foss.st.com>
|
||||||
L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
|
L: linux-stm32@st-md-mailman.stormreply.com (moderated for non-subscribers)
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -3115,7 +3115,7 @@ C: irc://irc.oftc.net/bcache
|
|||||||
F: drivers/md/bcache/
|
F: drivers/md/bcache/
|
||||||
|
|
||||||
BDISP ST MEDIA DRIVER
|
BDISP ST MEDIA DRIVER
|
||||||
M: Fabien Dessenne <fabien.dessenne@st.com>
|
M: Fabien Dessenne <fabien.dessenne@foss.st.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://linuxtv.org
|
W: https://linuxtv.org
|
||||||
@ -3675,7 +3675,7 @@ M: bcm-kernel-feedback-list@broadcom.com
|
|||||||
L: linux-pm@vger.kernel.org
|
L: linux-pm@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://github.com/broadcom/stblinux.git
|
T: git git://github.com/broadcom/stblinux.git
|
||||||
F: drivers/soc/bcm/bcm-pmb.c
|
F: drivers/soc/bcm/bcm63xx/bcm-pmb.c
|
||||||
F: include/dt-bindings/soc/bcm-pmb.h
|
F: include/dt-bindings/soc/bcm-pmb.h
|
||||||
|
|
||||||
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
|
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
|
||||||
@ -5080,7 +5080,7 @@ S: Maintained
|
|||||||
F: drivers/platform/x86/dell/dell-wmi.c
|
F: drivers/platform/x86/dell/dell-wmi.c
|
||||||
|
|
||||||
DELTA ST MEDIA DRIVER
|
DELTA ST MEDIA DRIVER
|
||||||
M: Hugues Fruchet <hugues.fruchet@st.com>
|
M: Hugues Fruchet <hugues.fruchet@foss.st.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://linuxtv.org
|
W: https://linuxtv.org
|
||||||
@ -6012,7 +6012,6 @@ F: drivers/gpu/drm/rockchip/
|
|||||||
|
|
||||||
DRM DRIVERS FOR STI
|
DRM DRIVERS FOR STI
|
||||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||||
M: Vincent Abriou <vincent.abriou@st.com>
|
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||||
@ -6020,10 +6019,9 @@ F: Documentation/devicetree/bindings/display/st,stih4xx.txt
|
|||||||
F: drivers/gpu/drm/sti
|
F: drivers/gpu/drm/sti
|
||||||
|
|
||||||
DRM DRIVERS FOR STM
|
DRM DRIVERS FOR STM
|
||||||
M: Yannick Fertre <yannick.fertre@st.com>
|
M: Yannick Fertre <yannick.fertre@foss.st.com>
|
||||||
M: Philippe Cornu <philippe.cornu@st.com>
|
M: Philippe Cornu <philippe.cornu@foss.st.com>
|
||||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||||
M: Vincent Abriou <vincent.abriou@st.com>
|
|
||||||
L: dri-devel@lists.freedesktop.org
|
L: dri-devel@lists.freedesktop.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||||
@ -7482,8 +7480,9 @@ F: include/uapi/asm-generic/
|
|||||||
GENERIC PHY FRAMEWORK
|
GENERIC PHY FRAMEWORK
|
||||||
M: Kishon Vijay Abraham I <kishon@ti.com>
|
M: Kishon Vijay Abraham I <kishon@ti.com>
|
||||||
M: Vinod Koul <vkoul@kernel.org>
|
M: Vinod Koul <vkoul@kernel.org>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-phy@lists.infradead.org
|
||||||
S: Supported
|
S: Supported
|
||||||
|
Q: https://patchwork.kernel.org/project/linux-phy/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/phy/linux-phy.git
|
||||||
F: Documentation/devicetree/bindings/phy/
|
F: Documentation/devicetree/bindings/phy/
|
||||||
F: drivers/phy/
|
F: drivers/phy/
|
||||||
@ -8236,7 +8235,7 @@ F: include/linux/hugetlb.h
|
|||||||
F: mm/hugetlb.c
|
F: mm/hugetlb.c
|
||||||
|
|
||||||
HVA ST MEDIA DRIVER
|
HVA ST MEDIA DRIVER
|
||||||
M: Jean-Christophe Trotin <jean-christophe.trotin@st.com>
|
M: Jean-Christophe Trotin <jean-christophe.trotin@foss.st.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://linuxtv.org
|
W: https://linuxtv.org
|
||||||
@ -8526,6 +8525,7 @@ IBM Power SRIOV Virtual NIC Device Driver
|
|||||||
M: Dany Madden <drt@linux.ibm.com>
|
M: Dany Madden <drt@linux.ibm.com>
|
||||||
M: Lijun Pan <ljp@linux.ibm.com>
|
M: Lijun Pan <ljp@linux.ibm.com>
|
||||||
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
M: Sukadev Bhattiprolu <sukadev@linux.ibm.com>
|
||||||
|
R: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/ibm/ibmvnic.*
|
F: drivers/net/ethernet/ibm/ibmvnic.*
|
||||||
@ -10035,7 +10035,6 @@ F: scripts/leaking_addresses.pl
|
|||||||
|
|
||||||
LED SUBSYSTEM
|
LED SUBSYSTEM
|
||||||
M: Pavel Machek <pavel@ucw.cz>
|
M: Pavel Machek <pavel@ucw.cz>
|
||||||
R: Dan Murphy <dmurphy@ti.com>
|
|
||||||
L: linux-leds@vger.kernel.org
|
L: linux-leds@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
|
||||||
@ -10911,7 +10910,6 @@ T: git git://linuxtv.org/media_tree.git
|
|||||||
F: drivers/media/radio/radio-maxiradio*
|
F: drivers/media/radio/radio-maxiradio*
|
||||||
|
|
||||||
MCAN MMIO DEVICE DRIVER
|
MCAN MMIO DEVICE DRIVER
|
||||||
M: Dan Murphy <dmurphy@ti.com>
|
|
||||||
M: Pankaj Sharma <pankj.sharma@samsung.com>
|
M: Pankaj Sharma <pankj.sharma@samsung.com>
|
||||||
L: linux-can@vger.kernel.org
|
L: linux-can@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -11172,7 +11170,7 @@ T: git git://linuxtv.org/media_tree.git
|
|||||||
F: drivers/media/dvb-frontends/stv6111*
|
F: drivers/media/dvb-frontends/stv6111*
|
||||||
|
|
||||||
MEDIA DRIVERS FOR STM32 - DCMI
|
MEDIA DRIVERS FOR STM32 - DCMI
|
||||||
M: Hugues Fruchet <hugues.fruchet@st.com>
|
M: Hugues Fruchet <hugues.fruchet@foss.st.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
T: git git://linuxtv.org/media_tree.git
|
T: git git://linuxtv.org/media_tree.git
|
||||||
@ -12543,7 +12541,7 @@ NETWORKING [MPTCP]
|
|||||||
M: Mat Martineau <mathew.j.martineau@linux.intel.com>
|
M: Mat Martineau <mathew.j.martineau@linux.intel.com>
|
||||||
M: Matthieu Baerts <matthieu.baerts@tessares.net>
|
M: Matthieu Baerts <matthieu.baerts@tessares.net>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: mptcp@lists.01.org
|
L: mptcp@lists.linux.dev
|
||||||
S: Maintained
|
S: Maintained
|
||||||
W: https://github.com/multipath-tcp/mptcp_net-next/wiki
|
W: https://github.com/multipath-tcp/mptcp_net-next/wiki
|
||||||
B: https://github.com/multipath-tcp/mptcp_net-next/issues
|
B: https://github.com/multipath-tcp/mptcp_net-next/issues
|
||||||
@ -14714,15 +14712,11 @@ F: drivers/net/ethernet/qlogic/qlcnic/
|
|||||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||||
M: Manish Chopra <manishc@marvell.com>
|
M: Manish Chopra <manishc@marvell.com>
|
||||||
M: GR-Linux-NIC-Dev@marvell.com
|
M: GR-Linux-NIC-Dev@marvell.com
|
||||||
L: netdev@vger.kernel.org
|
|
||||||
S: Supported
|
|
||||||
F: drivers/staging/qlge/
|
|
||||||
|
|
||||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
|
||||||
M: Coiby Xu <coiby.xu@gmail.com>
|
M: Coiby Xu <coiby.xu@gmail.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Supported
|
||||||
F: Documentation/networking/device_drivers/qlogic/qlge.rst
|
F: Documentation/networking/device_drivers/qlogic/qlge.rst
|
||||||
|
F: drivers/staging/qlge/
|
||||||
|
|
||||||
QM1D1B0004 MEDIA DRIVER
|
QM1D1B0004 MEDIA DRIVER
|
||||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||||
@ -15640,8 +15634,8 @@ F: Documentation/s390/pci.rst
|
|||||||
|
|
||||||
S390 VFIO AP DRIVER
|
S390 VFIO AP DRIVER
|
||||||
M: Tony Krowiak <akrowiak@linux.ibm.com>
|
M: Tony Krowiak <akrowiak@linux.ibm.com>
|
||||||
M: Pierre Morel <pmorel@linux.ibm.com>
|
|
||||||
M: Halil Pasic <pasic@linux.ibm.com>
|
M: Halil Pasic <pasic@linux.ibm.com>
|
||||||
|
M: Jason Herne <jjherne@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||||
@ -15653,6 +15647,7 @@ F: drivers/s390/crypto/vfio_ap_private.h
|
|||||||
S390 VFIO-CCW DRIVER
|
S390 VFIO-CCW DRIVER
|
||||||
M: Cornelia Huck <cohuck@redhat.com>
|
M: Cornelia Huck <cohuck@redhat.com>
|
||||||
M: Eric Farman <farman@linux.ibm.com>
|
M: Eric Farman <farman@linux.ibm.com>
|
||||||
|
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||||
R: Halil Pasic <pasic@linux.ibm.com>
|
R: Halil Pasic <pasic@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
L: kvm@vger.kernel.org
|
L: kvm@vger.kernel.org
|
||||||
@ -15663,6 +15658,7 @@ F: include/uapi/linux/vfio_ccw.h
|
|||||||
|
|
||||||
S390 VFIO-PCI DRIVER
|
S390 VFIO-PCI DRIVER
|
||||||
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||||
|
M: Eric Farman <farman@linux.ibm.com>
|
||||||
L: linux-s390@vger.kernel.org
|
L: linux-s390@vger.kernel.org
|
||||||
L: kvm@vger.kernel.org
|
L: kvm@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
@ -16892,8 +16888,10 @@ F: tools/spi/
|
|||||||
|
|
||||||
SPIDERNET NETWORK DRIVER for CELL
|
SPIDERNET NETWORK DRIVER for CELL
|
||||||
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
|
M: Ishizaki Kou <kou.ishizaki@toshiba.co.jp>
|
||||||
|
M: Geoff Levand <geoff@infradead.org>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
L: linuxppc-dev@lists.ozlabs.org
|
||||||
|
S: Maintained
|
||||||
F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
|
F: Documentation/networking/device_drivers/ethernet/toshiba/spider_net.rst
|
||||||
F: drivers/net/ethernet/toshiba/spider_net*
|
F: drivers/net/ethernet/toshiba/spider_net*
|
||||||
|
|
||||||
@ -16947,7 +16945,8 @@ F: Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
|
|||||||
F: drivers/media/i2c/st-mipid02.c
|
F: drivers/media/i2c/st-mipid02.c
|
||||||
|
|
||||||
ST STM32 I2C/SMBUS DRIVER
|
ST STM32 I2C/SMBUS DRIVER
|
||||||
M: Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
|
M: Pierre-Yves MORDRET <pierre-yves.mordret@foss.st.com>
|
||||||
|
M: Alain Volmat <alain.volmat@foss.st.com>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/i2c/busses/i2c-stm32*
|
F: drivers/i2c/busses/i2c-stm32*
|
||||||
@ -17072,7 +17071,7 @@ F: kernel/jump_label.c
|
|||||||
F: kernel/static_call.c
|
F: kernel/static_call.c
|
||||||
|
|
||||||
STI AUDIO (ASoC) DRIVERS
|
STI AUDIO (ASoC) DRIVERS
|
||||||
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
|
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
|
F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
|
||||||
@ -17092,15 +17091,15 @@ T: git git://linuxtv.org/media_tree.git
|
|||||||
F: drivers/media/usb/stk1160/
|
F: drivers/media/usb/stk1160/
|
||||||
|
|
||||||
STM32 AUDIO (ASoC) DRIVERS
|
STM32 AUDIO (ASoC) DRIVERS
|
||||||
M: Olivier Moysan <olivier.moysan@st.com>
|
M: Olivier Moysan <olivier.moysan@foss.st.com>
|
||||||
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
|
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
|
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
|
||||||
F: sound/soc/stm/
|
F: sound/soc/stm/
|
||||||
|
|
||||||
STM32 TIMER/LPTIMER DRIVERS
|
STM32 TIMER/LPTIMER DRIVERS
|
||||||
M: Fabrice Gasnier <fabrice.gasnier@st.com>
|
M: Fabrice Gasnier <fabrice.gasnier@foss.st.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/ABI/testing/*timer-stm32
|
F: Documentation/ABI/testing/*timer-stm32
|
||||||
F: Documentation/devicetree/bindings/*/*stm32-*timer*
|
F: Documentation/devicetree/bindings/*/*stm32-*timer*
|
||||||
@ -17110,7 +17109,7 @@ F: include/linux/*/stm32-*tim*
|
|||||||
|
|
||||||
STMMAC ETHERNET DRIVER
|
STMMAC ETHERNET DRIVER
|
||||||
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
M: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||||
M: Alexandre Torgue <alexandre.torgue@st.com>
|
M: Alexandre Torgue <alexandre.torgue@foss.st.com>
|
||||||
M: Jose Abreu <joabreu@synopsys.com>
|
M: Jose Abreu <joabreu@synopsys.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
@ -17852,7 +17851,6 @@ S: Maintained
|
|||||||
F: drivers/thermal/ti-soc-thermal/
|
F: drivers/thermal/ti-soc-thermal/
|
||||||
|
|
||||||
TI BQ27XXX POWER SUPPLY DRIVER
|
TI BQ27XXX POWER SUPPLY DRIVER
|
||||||
R: Dan Murphy <dmurphy@ti.com>
|
|
||||||
F: drivers/power/supply/bq27xxx_battery.c
|
F: drivers/power/supply/bq27xxx_battery.c
|
||||||
F: drivers/power/supply/bq27xxx_battery_i2c.c
|
F: drivers/power/supply/bq27xxx_battery_i2c.c
|
||||||
F: include/linux/power/bq27xxx_battery.h
|
F: include/linux/power/bq27xxx_battery.h
|
||||||
@ -17987,7 +17985,6 @@ S: Odd Fixes
|
|||||||
F: sound/soc/codecs/tas571x*
|
F: sound/soc/codecs/tas571x*
|
||||||
|
|
||||||
TI TCAN4X5X DEVICE DRIVER
|
TI TCAN4X5X DEVICE DRIVER
|
||||||
M: Dan Murphy <dmurphy@ti.com>
|
|
||||||
L: linux-can@vger.kernel.org
|
L: linux-can@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt
|
F: Documentation/devicetree/bindings/net/can/tcan4x5x.txt
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 12
|
PATCHLEVEL = 12
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc6
|
||||||
NAME = Frozen Wasteland
|
NAME = Frozen Wasteland
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -40,6 +40,9 @@
|
|||||||
ethernet1 = &cpsw_emac1;
|
ethernet1 = &cpsw_emac1;
|
||||||
spi0 = &spi0;
|
spi0 = &spi0;
|
||||||
spi1 = &spi1;
|
spi1 = &spi1;
|
||||||
|
mmc0 = &mmc1;
|
||||||
|
mmc1 = &mmc2;
|
||||||
|
mmc2 = &mmc3;
|
||||||
};
|
};
|
||||||
|
|
||||||
cpus {
|
cpus {
|
||||||
|
@ -334,14 +334,6 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
&pinctrl {
|
&pinctrl {
|
||||||
atmel,mux-mask = <
|
|
||||||
/* A B C */
|
|
||||||
0xFFFFFE7F 0xC0E0397F 0xEF00019D /* pioA */
|
|
||||||
0x03FFFFFF 0x02FC7E68 0x00780000 /* pioB */
|
|
||||||
0xffffffff 0xF83FFFFF 0xB800F3FC /* pioC */
|
|
||||||
0x003FFFFF 0x003F8000 0x00000000 /* pioD */
|
|
||||||
>;
|
|
||||||
|
|
||||||
adc {
|
adc {
|
||||||
pinctrl_adc_default: adc_default {
|
pinctrl_adc_default: adc_default {
|
||||||
atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
|
atmel,pins = <AT91_PIOB 15 AT91_PERIPH_A AT91_PINCTRL_NONE>;
|
||||||
|
@ -84,8 +84,8 @@
|
|||||||
pinctrl-0 = <&pinctrl_macb0_default>;
|
pinctrl-0 = <&pinctrl_macb0_default>;
|
||||||
phy-mode = "rmii";
|
phy-mode = "rmii";
|
||||||
|
|
||||||
ethernet-phy@0 {
|
ethernet-phy@7 {
|
||||||
reg = <0x0>;
|
reg = <0x7>;
|
||||||
interrupt-parent = <&pioA>;
|
interrupt-parent = <&pioA>;
|
||||||
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
|
interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
@ -210,9 +210,6 @@
|
|||||||
micrel,led-mode = <1>;
|
micrel,led-mode = <1>;
|
||||||
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||||
clock-names = "rmii-ref";
|
clock-names = "rmii-ref";
|
||||||
reset-gpios = <&gpio_spi 1 GPIO_ACTIVE_LOW>;
|
|
||||||
reset-assert-us = <10000>;
|
|
||||||
reset-deassert-us = <100>;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -222,9 +219,6 @@
|
|||||||
micrel,led-mode = <1>;
|
micrel,led-mode = <1>;
|
||||||
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
|
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
|
||||||
clock-names = "rmii-ref";
|
clock-names = "rmii-ref";
|
||||||
reset-gpios = <&gpio_spi 2 GPIO_ACTIVE_LOW>;
|
|
||||||
reset-assert-us = <10000>;
|
|
||||||
reset-deassert-us = <100>;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
@ -243,6 +237,22 @@
|
|||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
&gpio_spi {
|
||||||
|
eth0-phy-hog {
|
||||||
|
gpio-hog;
|
||||||
|
gpios = <1 GPIO_ACTIVE_HIGH>;
|
||||||
|
output-high;
|
||||||
|
line-name = "eth0-phy";
|
||||||
|
};
|
||||||
|
|
||||||
|
eth1-phy-hog {
|
||||||
|
gpio-hog;
|
||||||
|
gpios = <2 GPIO_ACTIVE_HIGH>;
|
||||||
|
output-high;
|
||||||
|
line-name = "eth1-phy";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
&i2c1 {
|
&i2c1 {
|
||||||
clock-frequency = <100000>;
|
clock-frequency = <100000>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default";
|
||||||
|
@ -14,5 +14,6 @@
|
|||||||
};
|
};
|
||||||
|
|
||||||
&gpmi {
|
&gpmi {
|
||||||
|
fsl,use-minimum-ecc;
|
||||||
status = "okay";
|
status = "okay";
|
||||||
};
|
};
|
||||||
|
@ -606,6 +606,15 @@
|
|||||||
compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
|
compatible = "microchip,sam9x60-pinctrl", "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
|
||||||
ranges = <0xfffff400 0xfffff400 0x800>;
|
ranges = <0xfffff400 0xfffff400 0x800>;
|
||||||
|
|
||||||
|
/* mux-mask corresponding to sam9x60 SoC in TFBGA228L package */
|
||||||
|
atmel,mux-mask = <
|
||||||
|
/* A B C */
|
||||||
|
0xffffffff 0xffe03fff 0xef00019d /* pioA */
|
||||||
|
0x03ffffff 0x02fc7e7f 0x00780000 /* pioB */
|
||||||
|
0xffffffff 0xffffffff 0xf83fffff /* pioC */
|
||||||
|
0x003fffff 0x003f8000 0x00000000 /* pioD */
|
||||||
|
>;
|
||||||
|
|
||||||
pioA: gpio@fffff400 {
|
pioA: gpio@fffff400 {
|
||||||
compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
|
compatible = "microchip,sam9x60-gpio", "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio";
|
||||||
reg = <0xfffff400 0x200>;
|
reg = <0xfffff400 0x200>;
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/irq.h>
|
#include <linux/irq.h>
|
||||||
#include <linux/irqdomain.h>
|
#include <linux/irqdomain.h>
|
||||||
|
#include <linux/irqchip.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/of_address.h>
|
#include <linux/of_address.h>
|
||||||
@ -162,7 +163,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
|
|||||||
* interrupts. It registers the interrupt enable and disable functions
|
* interrupts. It registers the interrupt enable and disable functions
|
||||||
* to the kernel for each interrupt source.
|
* to the kernel for each interrupt source.
|
||||||
*/
|
*/
|
||||||
void __init mxc_init_irq(void __iomem *irqbase)
|
static void __init mxc_init_irq(void __iomem *irqbase)
|
||||||
{
|
{
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
int irq_base;
|
int irq_base;
|
||||||
@ -220,3 +221,16 @@ void __init mxc_init_irq(void __iomem *irqbase)
|
|||||||
|
|
||||||
printk(KERN_INFO "MXC IRQ initialized\n");
|
printk(KERN_INFO "MXC IRQ initialized\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init imx_avic_init(struct device_node *node,
|
||||||
|
struct device_node *parent)
|
||||||
|
{
|
||||||
|
void __iomem *avic_base;
|
||||||
|
|
||||||
|
avic_base = of_iomap(node, 0);
|
||||||
|
BUG_ON(!avic_base);
|
||||||
|
mxc_init_irq(avic_base);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
IRQCHIP_DECLARE(imx_avic, "fsl,avic", imx_avic_init);
|
||||||
|
@ -22,7 +22,6 @@ void mx35_map_io(void);
|
|||||||
void imx21_init_early(void);
|
void imx21_init_early(void);
|
||||||
void imx31_init_early(void);
|
void imx31_init_early(void);
|
||||||
void imx35_init_early(void);
|
void imx35_init_early(void);
|
||||||
void mxc_init_irq(void __iomem *);
|
|
||||||
void mx31_init_irq(void);
|
void mx31_init_irq(void);
|
||||||
void mx35_init_irq(void);
|
void mx35_init_irq(void);
|
||||||
void mxc_set_cpu_type(unsigned int type);
|
void mxc_set_cpu_type(unsigned int type);
|
||||||
|
@ -17,16 +17,6 @@ static void __init imx1_init_early(void)
|
|||||||
mxc_set_cpu_type(MXC_CPU_MX1);
|
mxc_set_cpu_type(MXC_CPU_MX1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init imx1_init_irq(void)
|
|
||||||
{
|
|
||||||
void __iomem *avic_addr;
|
|
||||||
|
|
||||||
avic_addr = ioremap(MX1_AVIC_ADDR, SZ_4K);
|
|
||||||
WARN_ON(!avic_addr);
|
|
||||||
|
|
||||||
mxc_init_irq(avic_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * const imx1_dt_board_compat[] __initconst = {
|
static const char * const imx1_dt_board_compat[] __initconst = {
|
||||||
"fsl,imx1",
|
"fsl,imx1",
|
||||||
NULL
|
NULL
|
||||||
@ -34,7 +24,6 @@ static const char * const imx1_dt_board_compat[] __initconst = {
|
|||||||
|
|
||||||
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
|
DT_MACHINE_START(IMX1_DT, "Freescale i.MX1 (Device Tree Support)")
|
||||||
.init_early = imx1_init_early,
|
.init_early = imx1_init_early,
|
||||||
.init_irq = imx1_init_irq,
|
|
||||||
.dt_compat = imx1_dt_board_compat,
|
.dt_compat = imx1_dt_board_compat,
|
||||||
.restart = mxc_restart,
|
.restart = mxc_restart,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -22,17 +22,6 @@ static void __init imx25_dt_init(void)
|
|||||||
imx_aips_allow_unprivileged_access("fsl,imx25-aips");
|
imx_aips_allow_unprivileged_access("fsl,imx25-aips");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init mx25_init_irq(void)
|
|
||||||
{
|
|
||||||
struct device_node *np;
|
|
||||||
void __iomem *avic_base;
|
|
||||||
|
|
||||||
np = of_find_compatible_node(NULL, NULL, "fsl,avic");
|
|
||||||
avic_base = of_iomap(np, 0);
|
|
||||||
BUG_ON(!avic_base);
|
|
||||||
mxc_init_irq(avic_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * const imx25_dt_board_compat[] __initconst = {
|
static const char * const imx25_dt_board_compat[] __initconst = {
|
||||||
"fsl,imx25",
|
"fsl,imx25",
|
||||||
NULL
|
NULL
|
||||||
@ -42,6 +31,5 @@ DT_MACHINE_START(IMX25_DT, "Freescale i.MX25 (Device Tree Support)")
|
|||||||
.init_early = imx25_init_early,
|
.init_early = imx25_init_early,
|
||||||
.init_machine = imx25_dt_init,
|
.init_machine = imx25_dt_init,
|
||||||
.init_late = imx25_pm_init,
|
.init_late = imx25_pm_init,
|
||||||
.init_irq = mx25_init_irq,
|
|
||||||
.dt_compat = imx25_dt_board_compat,
|
.dt_compat = imx25_dt_board_compat,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -56,17 +56,6 @@ static void __init imx27_init_early(void)
|
|||||||
mxc_set_cpu_type(MXC_CPU_MX27);
|
mxc_set_cpu_type(MXC_CPU_MX27);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init mx27_init_irq(void)
|
|
||||||
{
|
|
||||||
void __iomem *avic_base;
|
|
||||||
struct device_node *np;
|
|
||||||
|
|
||||||
np = of_find_compatible_node(NULL, NULL, "fsl,avic");
|
|
||||||
avic_base = of_iomap(np, 0);
|
|
||||||
BUG_ON(!avic_base);
|
|
||||||
mxc_init_irq(avic_base);
|
|
||||||
}
|
|
||||||
|
|
||||||
static const char * const imx27_dt_board_compat[] __initconst = {
|
static const char * const imx27_dt_board_compat[] __initconst = {
|
||||||
"fsl,imx27",
|
"fsl,imx27",
|
||||||
NULL
|
NULL
|
||||||
@ -75,7 +64,6 @@ static const char * const imx27_dt_board_compat[] __initconst = {
|
|||||||
DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
|
DT_MACHINE_START(IMX27_DT, "Freescale i.MX27 (Device Tree Support)")
|
||||||
.map_io = mx27_map_io,
|
.map_io = mx27_map_io,
|
||||||
.init_early = imx27_init_early,
|
.init_early = imx27_init_early,
|
||||||
.init_irq = mx27_init_irq,
|
|
||||||
.init_late = imx27_pm_init,
|
.init_late = imx27_pm_init,
|
||||||
.dt_compat = imx27_dt_board_compat,
|
.dt_compat = imx27_dt_board_compat,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -14,6 +14,5 @@ static const char * const imx31_dt_board_compat[] __initconst = {
|
|||||||
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
|
DT_MACHINE_START(IMX31_DT, "Freescale i.MX31 (Device Tree Support)")
|
||||||
.map_io = mx31_map_io,
|
.map_io = mx31_map_io,
|
||||||
.init_early = imx31_init_early,
|
.init_early = imx31_init_early,
|
||||||
.init_irq = mx31_init_irq,
|
|
||||||
.dt_compat = imx31_dt_board_compat,
|
.dt_compat = imx31_dt_board_compat,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -27,6 +27,5 @@ DT_MACHINE_START(IMX35_DT, "Freescale i.MX35 (Device Tree Support)")
|
|||||||
.l2c_aux_mask = ~0,
|
.l2c_aux_mask = ~0,
|
||||||
.map_io = mx35_map_io,
|
.map_io = mx35_map_io,
|
||||||
.init_early = imx35_init_early,
|
.init_early = imx35_init_early,
|
||||||
.init_irq = mx35_init_irq,
|
|
||||||
.dt_compat = imx35_dt_board_compat,
|
.dt_compat = imx35_dt_board_compat,
|
||||||
MACHINE_END
|
MACHINE_END
|
||||||
|
@ -109,18 +109,6 @@ void __init imx31_init_early(void)
|
|||||||
mx3_ccm_base = of_iomap(np, 0);
|
mx3_ccm_base = of_iomap(np, 0);
|
||||||
BUG_ON(!mx3_ccm_base);
|
BUG_ON(!mx3_ccm_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init mx31_init_irq(void)
|
|
||||||
{
|
|
||||||
void __iomem *avic_base;
|
|
||||||
struct device_node *np;
|
|
||||||
|
|
||||||
np = of_find_compatible_node(NULL, NULL, "fsl,imx31-avic");
|
|
||||||
avic_base = of_iomap(np, 0);
|
|
||||||
BUG_ON(!avic_base);
|
|
||||||
|
|
||||||
mxc_init_irq(avic_base);
|
|
||||||
}
|
|
||||||
#endif /* ifdef CONFIG_SOC_IMX31 */
|
#endif /* ifdef CONFIG_SOC_IMX31 */
|
||||||
|
|
||||||
#ifdef CONFIG_SOC_IMX35
|
#ifdef CONFIG_SOC_IMX35
|
||||||
@ -158,16 +146,4 @@ void __init imx35_init_early(void)
|
|||||||
mx3_ccm_base = of_iomap(np, 0);
|
mx3_ccm_base = of_iomap(np, 0);
|
||||||
BUG_ON(!mx3_ccm_base);
|
BUG_ON(!mx3_ccm_base);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init mx35_init_irq(void)
|
|
||||||
{
|
|
||||||
void __iomem *avic_base;
|
|
||||||
struct device_node *np;
|
|
||||||
|
|
||||||
np = of_find_compatible_node(NULL, NULL, "fsl,imx35-avic");
|
|
||||||
avic_base = of_iomap(np, 0);
|
|
||||||
BUG_ON(!avic_base);
|
|
||||||
|
|
||||||
mxc_init_irq(avic_base);
|
|
||||||
}
|
|
||||||
#endif /* ifdef CONFIG_SOC_IMX35 */
|
#endif /* ifdef CONFIG_SOC_IMX35 */
|
||||||
|
@ -88,34 +88,26 @@ static void __init sr_set_nvalues(struct omap_volt_data *volt_data,
|
|||||||
|
|
||||||
extern struct omap_sr_data omap_sr_pdata[];
|
extern struct omap_sr_data omap_sr_pdata[];
|
||||||
|
|
||||||
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
|
static int __init sr_init_by_name(const char *name, const char *voltdm)
|
||||||
{
|
{
|
||||||
struct omap_sr_data *sr_data = NULL;
|
struct omap_sr_data *sr_data = NULL;
|
||||||
struct omap_volt_data *volt_data;
|
struct omap_volt_data *volt_data;
|
||||||
struct omap_smartreflex_dev_attr *sr_dev_attr;
|
|
||||||
static int i;
|
static int i;
|
||||||
|
|
||||||
if (!strncmp(oh->name, "smartreflex_mpu_iva", 20) ||
|
if (!strncmp(name, "smartreflex_mpu_iva", 20) ||
|
||||||
!strncmp(oh->name, "smartreflex_mpu", 16))
|
!strncmp(name, "smartreflex_mpu", 16))
|
||||||
sr_data = &omap_sr_pdata[OMAP_SR_MPU];
|
sr_data = &omap_sr_pdata[OMAP_SR_MPU];
|
||||||
else if (!strncmp(oh->name, "smartreflex_core", 17))
|
else if (!strncmp(name, "smartreflex_core", 17))
|
||||||
sr_data = &omap_sr_pdata[OMAP_SR_CORE];
|
sr_data = &omap_sr_pdata[OMAP_SR_CORE];
|
||||||
else if (!strncmp(oh->name, "smartreflex_iva", 16))
|
else if (!strncmp(name, "smartreflex_iva", 16))
|
||||||
sr_data = &omap_sr_pdata[OMAP_SR_IVA];
|
sr_data = &omap_sr_pdata[OMAP_SR_IVA];
|
||||||
|
|
||||||
if (!sr_data) {
|
if (!sr_data) {
|
||||||
pr_err("%s: Unknown instance %s\n", __func__, oh->name);
|
pr_err("%s: Unknown instance %s\n", __func__, name);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
|
sr_data->name = name;
|
||||||
if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
|
|
||||||
pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
|
|
||||||
__func__, oh->name);
|
|
||||||
goto exit;
|
|
||||||
}
|
|
||||||
|
|
||||||
sr_data->name = oh->name;
|
|
||||||
if (cpu_is_omap343x())
|
if (cpu_is_omap343x())
|
||||||
sr_data->ip_type = 1;
|
sr_data->ip_type = 1;
|
||||||
else
|
else
|
||||||
@ -136,10 +128,10 @@ static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sr_data->voltdm = voltdm_lookup(sr_dev_attr->sensor_voltdm_name);
|
sr_data->voltdm = voltdm_lookup(voltdm);
|
||||||
if (!sr_data->voltdm) {
|
if (!sr_data->voltdm) {
|
||||||
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
|
pr_err("%s: Unable to get voltage domain pointer for VDD %s\n",
|
||||||
__func__, sr_dev_attr->sensor_voltdm_name);
|
__func__, voltdm);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,6 +152,20 @@ exit:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init sr_dev_init(struct omap_hwmod *oh, void *user)
|
||||||
|
{
|
||||||
|
struct omap_smartreflex_dev_attr *sr_dev_attr;
|
||||||
|
|
||||||
|
sr_dev_attr = (struct omap_smartreflex_dev_attr *)oh->dev_attr;
|
||||||
|
if (!sr_dev_attr || !sr_dev_attr->sensor_voltdm_name) {
|
||||||
|
pr_err("%s: No voltage domain specified for %s. Cannot initialize\n",
|
||||||
|
__func__, oh->name);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
return sr_init_by_name(oh->name, sr_dev_attr->sensor_voltdm_name);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* API to be called from board files to enable smartreflex
|
* API to be called from board files to enable smartreflex
|
||||||
* autocompensation at init.
|
* autocompensation at init.
|
||||||
@ -169,7 +175,42 @@ void __init omap_enable_smartreflex_on_init(void)
|
|||||||
sr_enable_on_init = true;
|
sr_enable_on_init = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const char * const omap4_sr_instances[] = {
|
||||||
|
"mpu",
|
||||||
|
"iva",
|
||||||
|
"core",
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const dra7_sr_instances[] = {
|
||||||
|
"mpu",
|
||||||
|
"core",
|
||||||
|
};
|
||||||
|
|
||||||
int __init omap_devinit_smartreflex(void)
|
int __init omap_devinit_smartreflex(void)
|
||||||
{
|
{
|
||||||
|
const char * const *sr_inst;
|
||||||
|
int i, nr_sr = 0;
|
||||||
|
|
||||||
|
if (soc_is_omap44xx()) {
|
||||||
|
sr_inst = omap4_sr_instances;
|
||||||
|
nr_sr = ARRAY_SIZE(omap4_sr_instances);
|
||||||
|
|
||||||
|
} else if (soc_is_dra7xx()) {
|
||||||
|
sr_inst = dra7_sr_instances;
|
||||||
|
nr_sr = ARRAY_SIZE(dra7_sr_instances);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (nr_sr) {
|
||||||
|
const char *name, *voltdm;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_sr; i++) {
|
||||||
|
name = kasprintf(GFP_KERNEL, "smartreflex_%s", sr_inst[i]);
|
||||||
|
voltdm = sr_inst[i];
|
||||||
|
sr_init_by_name(name, voltdm);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
|
return omap_hwmod_for_each_by_class("smartreflex", sr_dev_init, NULL);
|
||||||
}
|
}
|
||||||
|
@ -810,6 +810,16 @@ config QCOM_FALKOR_ERRATUM_E1041
|
|||||||
|
|
||||||
If unsure, say Y.
|
If unsure, say Y.
|
||||||
|
|
||||||
|
config NVIDIA_CARMEL_CNP_ERRATUM
|
||||||
|
bool "NVIDIA Carmel CNP: CNP on Carmel semantically different than ARM cores"
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
If CNP is enabled on Carmel cores, non-sharable TLBIs on a core will not
|
||||||
|
invalidate shared TLB entries installed by a different core, as it would
|
||||||
|
on standard ARM cores.
|
||||||
|
|
||||||
|
If unsure, say Y.
|
||||||
|
|
||||||
config SOCIONEXT_SYNQUACER_PREITS
|
config SOCIONEXT_SYNQUACER_PREITS
|
||||||
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
|
bool "Socionext Synquacer: Workaround for GICv3 pre-ITS"
|
||||||
default y
|
default y
|
||||||
|
@ -198,6 +198,7 @@
|
|||||||
ranges = <0x0 0x00 0x1700000 0x100000>;
|
ranges = <0x0 0x00 0x1700000 0x100000>;
|
||||||
reg = <0x00 0x1700000 0x0 0x100000>;
|
reg = <0x00 0x1700000 0x0 0x100000>;
|
||||||
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
dma-coherent;
|
||||||
|
|
||||||
sec_jr0: jr@10000 {
|
sec_jr0: jr@10000 {
|
||||||
compatible = "fsl,sec-v5.4-job-ring",
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
@ -348,6 +348,7 @@
|
|||||||
ranges = <0x0 0x00 0x1700000 0x100000>;
|
ranges = <0x0 0x00 0x1700000 0x100000>;
|
||||||
reg = <0x00 0x1700000 0x0 0x100000>;
|
reg = <0x00 0x1700000 0x0 0x100000>;
|
||||||
interrupts = <0 75 0x4>;
|
interrupts = <0 75 0x4>;
|
||||||
|
dma-coherent;
|
||||||
|
|
||||||
sec_jr0: jr@10000 {
|
sec_jr0: jr@10000 {
|
||||||
compatible = "fsl,sec-v5.4-job-ring",
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
@ -354,6 +354,7 @@
|
|||||||
ranges = <0x0 0x00 0x1700000 0x100000>;
|
ranges = <0x0 0x00 0x1700000 0x100000>;
|
||||||
reg = <0x00 0x1700000 0x0 0x100000>;
|
reg = <0x00 0x1700000 0x0 0x100000>;
|
||||||
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
dma-coherent;
|
||||||
|
|
||||||
sec_jr0: jr@10000 {
|
sec_jr0: jr@10000 {
|
||||||
compatible = "fsl,sec-v5.4-job-ring",
|
compatible = "fsl,sec-v5.4-job-ring",
|
||||||
|
@ -35,7 +35,7 @@
|
|||||||
|
|
||||||
&i2c2 {
|
&i2c2 {
|
||||||
clock-frequency = <400000>;
|
clock-frequency = <400000>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default", "gpio";
|
||||||
pinctrl-0 = <&pinctrl_i2c2>;
|
pinctrl-0 = <&pinctrl_i2c2>;
|
||||||
pinctrl-1 = <&pinctrl_i2c2_gpio>;
|
pinctrl-1 = <&pinctrl_i2c2_gpio>;
|
||||||
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
sda-gpios = <&gpio5 17 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||||
|
@ -67,7 +67,7 @@
|
|||||||
|
|
||||||
&i2c1 {
|
&i2c1 {
|
||||||
clock-frequency = <400000>;
|
clock-frequency = <400000>;
|
||||||
pinctrl-names = "default";
|
pinctrl-names = "default", "gpio";
|
||||||
pinctrl-0 = <&pinctrl_i2c1>;
|
pinctrl-0 = <&pinctrl_i2c1>;
|
||||||
pinctrl-1 = <&pinctrl_i2c1_gpio>;
|
pinctrl-1 = <&pinctrl_i2c1_gpio>;
|
||||||
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
sda-gpios = <&gpio5 15 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>;
|
||||||
|
@ -37,7 +37,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
|||||||
} while (--n > 0);
|
} while (--n > 0);
|
||||||
|
|
||||||
sum += ((sum >> 32) | (sum << 32));
|
sum += ((sum >> 32) | (sum << 32));
|
||||||
return csum_fold((__force u32)(sum >> 32));
|
return csum_fold((__force __wsum)(sum >> 32));
|
||||||
}
|
}
|
||||||
#define ip_fast_csum ip_fast_csum
|
#define ip_fast_csum ip_fast_csum
|
||||||
|
|
||||||
|
@ -66,7 +66,8 @@
|
|||||||
#define ARM64_WORKAROUND_1508412 58
|
#define ARM64_WORKAROUND_1508412 58
|
||||||
#define ARM64_HAS_LDAPR 59
|
#define ARM64_HAS_LDAPR 59
|
||||||
#define ARM64_KVM_PROTECTED_MODE 60
|
#define ARM64_KVM_PROTECTED_MODE 60
|
||||||
|
#define ARM64_WORKAROUND_NVIDIA_CARMEL_CNP 61
|
||||||
|
|
||||||
#define ARM64_NCAPS 61
|
#define ARM64_NCAPS 62
|
||||||
|
|
||||||
#endif /* __ASM_CPUCAPS_H */
|
#endif /* __ASM_CPUCAPS_H */
|
||||||
|
@ -278,6 +278,7 @@
|
|||||||
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
|
#define CPTR_EL2_DEFAULT CPTR_EL2_RES1
|
||||||
|
|
||||||
/* Hyp Debug Configuration Register bits */
|
/* Hyp Debug Configuration Register bits */
|
||||||
|
#define MDCR_EL2_TTRF (1 << 19)
|
||||||
#define MDCR_EL2_TPMS (1 << 14)
|
#define MDCR_EL2_TPMS (1 << 14)
|
||||||
#define MDCR_EL2_E2PB_MASK (UL(0x3))
|
#define MDCR_EL2_E2PB_MASK (UL(0x3))
|
||||||
#define MDCR_EL2_E2PB_SHIFT (UL(12))
|
#define MDCR_EL2_E2PB_SHIFT (UL(12))
|
||||||
|
@ -251,6 +251,8 @@ unsigned long get_wchan(struct task_struct *p);
|
|||||||
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
extern struct task_struct *cpu_switch_to(struct task_struct *prev,
|
||||||
struct task_struct *next);
|
struct task_struct *next);
|
||||||
|
|
||||||
|
asmlinkage void arm64_preempt_schedule_irq(void);
|
||||||
|
|
||||||
#define task_pt_regs(p) \
|
#define task_pt_regs(p) \
|
||||||
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
|
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
|
||||||
|
|
||||||
|
@ -55,6 +55,8 @@ void arch_setup_new_exec(void);
|
|||||||
#define arch_setup_new_exec arch_setup_new_exec
|
#define arch_setup_new_exec arch_setup_new_exec
|
||||||
|
|
||||||
void arch_release_task_struct(struct task_struct *tsk);
|
void arch_release_task_struct(struct task_struct *tsk);
|
||||||
|
int arch_dup_task_struct(struct task_struct *dst,
|
||||||
|
struct task_struct *src);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -525,6 +525,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||||||
0, 0,
|
0, 0,
|
||||||
1, 0),
|
1, 0),
|
||||||
},
|
},
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
|
||||||
|
{
|
||||||
|
/* NVIDIA Carmel */
|
||||||
|
.desc = "NVIDIA Carmel CNP erratum",
|
||||||
|
.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
|
||||||
|
ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
|
||||||
|
},
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -383,7 +383,6 @@ static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
|
|||||||
* of support.
|
* of support.
|
||||||
*/
|
*/
|
||||||
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
|
S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
|
|
||||||
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
|
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
|
||||||
ARM64_FTR_END,
|
ARM64_FTR_END,
|
||||||
};
|
};
|
||||||
@ -1321,7 +1320,10 @@ has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
|
|||||||
* may share TLB entries with a CPU stuck in the crashed
|
* may share TLB entries with a CPU stuck in the crashed
|
||||||
* kernel.
|
* kernel.
|
||||||
*/
|
*/
|
||||||
if (is_kdump_kernel())
|
if (is_kdump_kernel())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (cpus_have_const_cap(ARM64_WORKAROUND_NVIDIA_CARMEL_CNP))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return has_cpuid_feature(entry, scope);
|
return has_cpuid_feature(entry, scope);
|
||||||
|
@ -353,7 +353,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
|||||||
* with the CLIDR_EL1 fields to avoid triggering false warnings
|
* with the CLIDR_EL1 fields to avoid triggering false warnings
|
||||||
* when there is a mismatch across the CPUs. Keep track of the
|
* when there is a mismatch across the CPUs. Keep track of the
|
||||||
* effective value of the CTR_EL0 in our internal records for
|
* effective value of the CTR_EL0 in our internal records for
|
||||||
* acurate sanity check and feature enablement.
|
* accurate sanity check and feature enablement.
|
||||||
*/
|
*/
|
||||||
info->reg_ctr = read_cpuid_effective_cachetype();
|
info->reg_ctr = read_cpuid_effective_cachetype();
|
||||||
info->reg_dczid = read_cpuid(DCZID_EL0);
|
info->reg_dczid = read_cpuid(DCZID_EL0);
|
||||||
|
@ -64,5 +64,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
|
|||||||
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos)
|
||||||
{
|
{
|
||||||
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
|
memcpy(buf, phys_to_virt((phys_addr_t)*ppos), count);
|
||||||
|
*ppos += count;
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,8 @@
|
|||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/pointer_auth.h>
|
#include <asm/pointer_auth.h>
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
#include <asm/switch_to.h>
|
||||||
|
#include <asm/system_misc.h>
|
||||||
|
|
||||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||||
#include <linux/stackprotector.h>
|
#include <linux/stackprotector.h>
|
||||||
|
@ -194,8 +194,9 @@ void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|||||||
|
|
||||||
#ifdef CONFIG_STACKTRACE
|
#ifdef CONFIG_STACKTRACE
|
||||||
|
|
||||||
void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
noinline void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
||||||
struct task_struct *task, struct pt_regs *regs)
|
void *cookie, struct task_struct *task,
|
||||||
|
struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
struct stackframe frame;
|
struct stackframe frame;
|
||||||
|
|
||||||
@ -203,8 +204,8 @@ void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
|
|||||||
start_backtrace(&frame, regs->regs[29], regs->pc);
|
start_backtrace(&frame, regs->regs[29], regs->pc);
|
||||||
else if (task == current)
|
else if (task == current)
|
||||||
start_backtrace(&frame,
|
start_backtrace(&frame,
|
||||||
(unsigned long)__builtin_frame_address(0),
|
(unsigned long)__builtin_frame_address(1),
|
||||||
(unsigned long)arch_stack_walk);
|
(unsigned long)__builtin_return_address(0));
|
||||||
else
|
else
|
||||||
start_backtrace(&frame, thread_saved_fp(task),
|
start_backtrace(&frame, thread_saved_fp(task),
|
||||||
thread_saved_pc(task));
|
thread_saved_pc(task));
|
||||||
|
@ -89,6 +89,7 @@ void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
|
|||||||
* - Debug ROM Address (MDCR_EL2_TDRA)
|
* - Debug ROM Address (MDCR_EL2_TDRA)
|
||||||
* - OS related registers (MDCR_EL2_TDOSA)
|
* - OS related registers (MDCR_EL2_TDOSA)
|
||||||
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
|
* - Statistical profiler (MDCR_EL2_TPMS/MDCR_EL2_E2PB)
|
||||||
|
* - Self-hosted Trace Filter controls (MDCR_EL2_TTRF)
|
||||||
*
|
*
|
||||||
* Additionally, KVM only traps guest accesses to the debug registers if
|
* Additionally, KVM only traps guest accesses to the debug registers if
|
||||||
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
|
* the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
|
||||||
@ -112,6 +113,7 @@ void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
|
|||||||
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
|
vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
|
||||||
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
|
vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
|
||||||
MDCR_EL2_TPMS |
|
MDCR_EL2_TPMS |
|
||||||
|
MDCR_EL2_TTRF |
|
||||||
MDCR_EL2_TPMCR |
|
MDCR_EL2_TPMCR |
|
||||||
MDCR_EL2_TDRA |
|
MDCR_EL2_TDRA |
|
||||||
MDCR_EL2_TDOSA);
|
MDCR_EL2_TDOSA);
|
||||||
|
@ -429,6 +429,13 @@ u64 __vgic_v3_get_gic_config(void)
|
|||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
flags = local_daif_save();
|
flags = local_daif_save();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Table 11-2 "Permitted ICC_SRE_ELx.SRE settings" indicates
|
||||||
|
* that to be able to set ICC_SRE_EL1.SRE to 0, all the
|
||||||
|
* interrupt overrides must be set. You've got to love this.
|
||||||
|
*/
|
||||||
|
sysreg_clear_set(hcr_el2, 0, HCR_AMO | HCR_FMO | HCR_IMO);
|
||||||
|
isb();
|
||||||
write_gicreg(0, ICC_SRE_EL1);
|
write_gicreg(0, ICC_SRE_EL1);
|
||||||
isb();
|
isb();
|
||||||
|
|
||||||
@ -436,6 +443,8 @@ u64 __vgic_v3_get_gic_config(void)
|
|||||||
|
|
||||||
write_gicreg(sre, ICC_SRE_EL1);
|
write_gicreg(sre, ICC_SRE_EL1);
|
||||||
isb();
|
isb();
|
||||||
|
sysreg_clear_set(hcr_el2, HCR_AMO | HCR_FMO | HCR_IMO, 0);
|
||||||
|
isb();
|
||||||
|
|
||||||
if (has_vhe())
|
if (has_vhe())
|
||||||
local_daif_restore(flags);
|
local_daif_restore(flags);
|
||||||
|
@ -1448,6 +1448,22 @@ static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size)
|
|||||||
struct range arch_get_mappable_range(void)
|
struct range arch_get_mappable_range(void)
|
||||||
{
|
{
|
||||||
struct range mhp_range;
|
struct range mhp_range;
|
||||||
|
u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual));
|
||||||
|
u64 end_linear_pa = __pa(PAGE_END - 1);
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
|
||||||
|
/*
|
||||||
|
* Check for a wrap, it is possible because of randomized linear
|
||||||
|
* mapping the start physical address is actually bigger than
|
||||||
|
* the end physical address. In this case set start to zero
|
||||||
|
* because [0, end_linear_pa] range must still be able to cover
|
||||||
|
* all addressable physical addresses.
|
||||||
|
*/
|
||||||
|
if (start_linear_pa > end_linear_pa)
|
||||||
|
start_linear_pa = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON(start_linear_pa > end_linear_pa);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
|
* Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)]
|
||||||
@ -1455,8 +1471,9 @@ struct range arch_get_mappable_range(void)
|
|||||||
* range which can be mapped inside this linear mapping range, must
|
* range which can be mapped inside this linear mapping range, must
|
||||||
* also be derived from its end points.
|
* also be derived from its end points.
|
||||||
*/
|
*/
|
||||||
mhp_range.start = __pa(_PAGE_OFFSET(vabits_actual));
|
mhp_range.start = start_linear_pa;
|
||||||
mhp_range.end = __pa(PAGE_END - 1);
|
mhp_range.end = end_linear_pa;
|
||||||
|
|
||||||
return mhp_range;
|
return mhp_range;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ show_##name(struct device *dev, struct device_attribute *attr, \
|
|||||||
char *buf) \
|
char *buf) \
|
||||||
{ \
|
{ \
|
||||||
u32 cpu=dev->id; \
|
u32 cpu=dev->id; \
|
||||||
return sprintf(buf, "%lx\n", name[cpu]); \
|
return sprintf(buf, "%llx\n", name[cpu]); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#define store(name) \
|
#define store(name) \
|
||||||
@ -86,9 +86,9 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||||||
|
|
||||||
#ifdef ERR_INJ_DEBUG
|
#ifdef ERR_INJ_DEBUG
|
||||||
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
|
printk(KERN_DEBUG "pal_mc_err_inject for cpu%d:\n", cpu);
|
||||||
printk(KERN_DEBUG "err_type_info=%lx,\n", err_type_info[cpu]);
|
printk(KERN_DEBUG "err_type_info=%llx,\n", err_type_info[cpu]);
|
||||||
printk(KERN_DEBUG "err_struct_info=%lx,\n", err_struct_info[cpu]);
|
printk(KERN_DEBUG "err_struct_info=%llx,\n", err_struct_info[cpu]);
|
||||||
printk(KERN_DEBUG "err_data_buffer=%lx, %lx, %lx.\n",
|
printk(KERN_DEBUG "err_data_buffer=%llx, %llx, %llx.\n",
|
||||||
err_data_buffer[cpu].data1,
|
err_data_buffer[cpu].data1,
|
||||||
err_data_buffer[cpu].data2,
|
err_data_buffer[cpu].data2,
|
||||||
err_data_buffer[cpu].data3);
|
err_data_buffer[cpu].data3);
|
||||||
@ -117,8 +117,8 @@ store_call_start(struct device *dev, struct device_attribute *attr,
|
|||||||
|
|
||||||
#ifdef ERR_INJ_DEBUG
|
#ifdef ERR_INJ_DEBUG
|
||||||
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
|
printk(KERN_DEBUG "Returns: status=%d,\n", (int)status[cpu]);
|
||||||
printk(KERN_DEBUG "capabilities=%lx,\n", capabilities[cpu]);
|
printk(KERN_DEBUG "capabilities=%llx,\n", capabilities[cpu]);
|
||||||
printk(KERN_DEBUG "resources=%lx\n", resources[cpu]);
|
printk(KERN_DEBUG "resources=%llx\n", resources[cpu]);
|
||||||
#endif
|
#endif
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
@ -131,7 +131,7 @@ show_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
unsigned int cpu=dev->id;
|
unsigned int cpu=dev->id;
|
||||||
return sprintf(buf, "%lx\n", phys_addr[cpu]);
|
return sprintf(buf, "%llx\n", phys_addr[cpu]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
@ -145,7 +145,7 @@ store_virtual_to_phys(struct device *dev, struct device_attribute *attr,
|
|||||||
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
|
ret = get_user_pages_fast(virt_addr, 1, FOLL_WRITE, NULL);
|
||||||
if (ret<=0) {
|
if (ret<=0) {
|
||||||
#ifdef ERR_INJ_DEBUG
|
#ifdef ERR_INJ_DEBUG
|
||||||
printk("Virtual address %lx is not existing.\n",virt_addr);
|
printk("Virtual address %llx is not existing.\n", virt_addr);
|
||||||
#endif
|
#endif
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
@ -163,7 +163,7 @@ show_err_data_buffer(struct device *dev,
|
|||||||
{
|
{
|
||||||
unsigned int cpu=dev->id;
|
unsigned int cpu=dev->id;
|
||||||
|
|
||||||
return sprintf(buf, "%lx, %lx, %lx\n",
|
return sprintf(buf, "%llx, %llx, %llx\n",
|
||||||
err_data_buffer[cpu].data1,
|
err_data_buffer[cpu].data1,
|
||||||
err_data_buffer[cpu].data2,
|
err_data_buffer[cpu].data2,
|
||||||
err_data_buffer[cpu].data3);
|
err_data_buffer[cpu].data3);
|
||||||
@ -178,13 +178,13 @@ store_err_data_buffer(struct device *dev,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
#ifdef ERR_INJ_DEBUG
|
#ifdef ERR_INJ_DEBUG
|
||||||
printk("write err_data_buffer=[%lx,%lx,%lx] on cpu%d\n",
|
printk("write err_data_buffer=[%llx,%llx,%llx] on cpu%d\n",
|
||||||
err_data_buffer[cpu].data1,
|
err_data_buffer[cpu].data1,
|
||||||
err_data_buffer[cpu].data2,
|
err_data_buffer[cpu].data2,
|
||||||
err_data_buffer[cpu].data3,
|
err_data_buffer[cpu].data3,
|
||||||
cpu);
|
cpu);
|
||||||
#endif
|
#endif
|
||||||
ret=sscanf(buf, "%lx, %lx, %lx",
|
ret = sscanf(buf, "%llx, %llx, %llx",
|
||||||
&err_data_buffer[cpu].data1,
|
&err_data_buffer[cpu].data1,
|
||||||
&err_data_buffer[cpu].data2,
|
&err_data_buffer[cpu].data2,
|
||||||
&err_data_buffer[cpu].data3);
|
&err_data_buffer[cpu].data3);
|
||||||
|
@ -1824,7 +1824,7 @@ ia64_mca_cpu_init(void *cpu_data)
|
|||||||
data = mca_bootmem();
|
data = mca_bootmem();
|
||||||
first_time = 0;
|
first_time = 0;
|
||||||
} else
|
} else
|
||||||
data = (void *)__get_free_pages(GFP_KERNEL,
|
data = (void *)__get_free_pages(GFP_ATOMIC,
|
||||||
get_order(sz));
|
get_order(sz));
|
||||||
if (!data)
|
if (!data)
|
||||||
panic("Could not allocate MCA memory for cpu %d\n",
|
panic("Could not allocate MCA memory for cpu %d\n",
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
|
|
||||||
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
|
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
|
||||||
const char __section(".appended_dtb") __appended_dtb[0x100000];
|
char __section(".appended_dtb") __appended_dtb[0x100000];
|
||||||
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
|
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
|
||||||
|
|
||||||
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
|
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
|
||||||
|
@ -887,7 +887,8 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
|
|||||||
|
|
||||||
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
want_v = hpte_encode_avpn(vpn, psize, ssize);
|
||||||
|
|
||||||
flags = (newpp & 7) | H_AVPN;
|
flags = (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO)) | H_AVPN;
|
||||||
|
flags |= (newpp & HPTE_R_KEY_HI) >> 48;
|
||||||
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
|
if (mmu_has_feature(MMU_FTR_KERNEL_RO))
|
||||||
/* Move pp0 into bit 8 (IBM 55) */
|
/* Move pp0 into bit 8 (IBM 55) */
|
||||||
flags |= (newpp & HPTE_R_PP0) >> 55;
|
flags |= (newpp & HPTE_R_PP0) >> 55;
|
||||||
|
@ -452,12 +452,28 @@ static int do_suspend(void)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct pseries_suspend_info - State shared between CPUs for join/suspend.
|
||||||
|
* @counter: Threads are to increment this upon resuming from suspend
|
||||||
|
* or if an error is received from H_JOIN. The thread which performs
|
||||||
|
* the first increment (i.e. sets it to 1) is responsible for
|
||||||
|
* waking the other threads.
|
||||||
|
* @done: False if join/suspend is in progress. True if the operation is
|
||||||
|
* complete (successful or not).
|
||||||
|
*/
|
||||||
|
struct pseries_suspend_info {
|
||||||
|
atomic_t counter;
|
||||||
|
bool done;
|
||||||
|
};
|
||||||
|
|
||||||
static int do_join(void *arg)
|
static int do_join(void *arg)
|
||||||
{
|
{
|
||||||
atomic_t *counter = arg;
|
struct pseries_suspend_info *info = arg;
|
||||||
|
atomic_t *counter = &info->counter;
|
||||||
long hvrc;
|
long hvrc;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
retry:
|
||||||
/* Must ensure MSR.EE off for H_JOIN. */
|
/* Must ensure MSR.EE off for H_JOIN. */
|
||||||
hard_irq_disable();
|
hard_irq_disable();
|
||||||
hvrc = plpar_hcall_norets(H_JOIN);
|
hvrc = plpar_hcall_norets(H_JOIN);
|
||||||
@ -473,8 +489,20 @@ static int do_join(void *arg)
|
|||||||
case H_SUCCESS:
|
case H_SUCCESS:
|
||||||
/*
|
/*
|
||||||
* The suspend is complete and this cpu has received a
|
* The suspend is complete and this cpu has received a
|
||||||
* prod.
|
* prod, or we've received a stray prod from unrelated
|
||||||
|
* code (e.g. paravirt spinlocks) and we need to join
|
||||||
|
* again.
|
||||||
|
*
|
||||||
|
* This barrier orders the return from H_JOIN above vs
|
||||||
|
* the load of info->done. It pairs with the barrier
|
||||||
|
* in the wakeup/prod path below.
|
||||||
*/
|
*/
|
||||||
|
smp_mb();
|
||||||
|
if (READ_ONCE(info->done) == false) {
|
||||||
|
pr_info_ratelimited("premature return from H_JOIN on CPU %i, retrying",
|
||||||
|
smp_processor_id());
|
||||||
|
goto retry;
|
||||||
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
break;
|
break;
|
||||||
case H_BAD_MODE:
|
case H_BAD_MODE:
|
||||||
@ -488,6 +516,13 @@ static int do_join(void *arg)
|
|||||||
|
|
||||||
if (atomic_inc_return(counter) == 1) {
|
if (atomic_inc_return(counter) == 1) {
|
||||||
pr_info("CPU %u waking all threads\n", smp_processor_id());
|
pr_info("CPU %u waking all threads\n", smp_processor_id());
|
||||||
|
WRITE_ONCE(info->done, true);
|
||||||
|
/*
|
||||||
|
* This barrier orders the store to info->done vs subsequent
|
||||||
|
* H_PRODs to wake the other CPUs. It pairs with the barrier
|
||||||
|
* in the H_SUCCESS case above.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
prod_others();
|
prod_others();
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
@ -535,11 +570,16 @@ static int pseries_suspend(u64 handle)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
atomic_t counter = ATOMIC_INIT(0);
|
struct pseries_suspend_info info;
|
||||||
unsigned long vasi_state;
|
unsigned long vasi_state;
|
||||||
int vasi_err;
|
int vasi_err;
|
||||||
|
|
||||||
ret = stop_machine(do_join, &counter, cpu_online_mask);
|
info = (struct pseries_suspend_info) {
|
||||||
|
.counter = ATOMIC_INIT(0),
|
||||||
|
.done = false,
|
||||||
|
};
|
||||||
|
|
||||||
|
ret = stop_machine(do_join, &info, cpu_online_mask);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
break;
|
break;
|
||||||
/*
|
/*
|
||||||
|
@ -314,7 +314,7 @@ endchoice
|
|||||||
# Common NUMA Features
|
# Common NUMA Features
|
||||||
config NUMA
|
config NUMA
|
||||||
bool "NUMA Memory Allocation and Scheduler Support"
|
bool "NUMA Memory Allocation and Scheduler Support"
|
||||||
depends on SMP
|
depends on SMP && MMU
|
||||||
select GENERIC_ARCH_NUMA
|
select GENERIC_ARCH_NUMA
|
||||||
select OF_NUMA
|
select OF_NUMA
|
||||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||||
|
@ -306,7 +306,9 @@ do { \
|
|||||||
* data types like structures or arrays.
|
* data types like structures or arrays.
|
||||||
*
|
*
|
||||||
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
|
* @ptr must have pointer-to-simple-variable type, and @x must be assignable
|
||||||
* to the result of dereferencing @ptr.
|
* to the result of dereferencing @ptr. The value of @x is copied to avoid
|
||||||
|
* re-ordering where @x is evaluated inside the block that enables user-space
|
||||||
|
* access (thus bypassing user space protection if @x is a function).
|
||||||
*
|
*
|
||||||
* Caller must check the pointer with access_ok() before calling this
|
* Caller must check the pointer with access_ok() before calling this
|
||||||
* function.
|
* function.
|
||||||
@ -316,12 +318,13 @@ do { \
|
|||||||
#define __put_user(x, ptr) \
|
#define __put_user(x, ptr) \
|
||||||
({ \
|
({ \
|
||||||
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
|
__typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
|
||||||
|
__typeof__(*__gu_ptr) __val = (x); \
|
||||||
long __pu_err = 0; \
|
long __pu_err = 0; \
|
||||||
\
|
\
|
||||||
__chk_user_ptr(__gu_ptr); \
|
__chk_user_ptr(__gu_ptr); \
|
||||||
\
|
\
|
||||||
__enable_user_access(); \
|
__enable_user_access(); \
|
||||||
__put_user_nocheck(x, __gu_ptr, __pu_err); \
|
__put_user_nocheck(__val, __gu_ptr, __pu_err); \
|
||||||
__disable_user_access(); \
|
__disable_user_access(); \
|
||||||
\
|
\
|
||||||
__pu_err; \
|
__pu_err; \
|
||||||
|
@ -447,6 +447,7 @@ ENDPROC(__switch_to)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
.section ".rodata"
|
.section ".rodata"
|
||||||
|
.align LGREG
|
||||||
/* Exception vector table */
|
/* Exception vector table */
|
||||||
ENTRY(excp_vect_table)
|
ENTRY(excp_vect_table)
|
||||||
RISCV_PTR do_trap_insn_misaligned
|
RISCV_PTR do_trap_insn_misaligned
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
|
|
||||||
#include <asm/stacktrace.h>
|
#include <asm/stacktrace.h>
|
||||||
|
|
||||||
register const unsigned long sp_in_global __asm__("sp");
|
register unsigned long sp_in_global __asm__("sp");
|
||||||
|
|
||||||
#ifdef CONFIG_FRAME_POINTER
|
#ifdef CONFIG_FRAME_POINTER
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ void __init kasan_init(void)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
||||||
};
|
}
|
||||||
|
|
||||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||||
set_pte(&kasan_early_shadow_pte[i],
|
set_pte(&kasan_early_shadow_pte[i],
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
#include <vdso/datapage.h>
|
#include <vdso/datapage.h>
|
||||||
|
|
||||||
struct arch_vdso_data {
|
struct arch_vdso_data {
|
||||||
__u64 tod_steering_delta;
|
__s64 tod_steering_delta;
|
||||||
__u64 tod_steering_end;
|
__u64 tod_steering_end;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -80,10 +80,12 @@ void __init time_early_init(void)
|
|||||||
{
|
{
|
||||||
struct ptff_qto qto;
|
struct ptff_qto qto;
|
||||||
struct ptff_qui qui;
|
struct ptff_qui qui;
|
||||||
|
int cs;
|
||||||
|
|
||||||
/* Initialize TOD steering parameters */
|
/* Initialize TOD steering parameters */
|
||||||
tod_steering_end = tod_clock_base.tod;
|
tod_steering_end = tod_clock_base.tod;
|
||||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
for (cs = 0; cs < CS_BASES; cs++)
|
||||||
|
vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
|
||||||
|
|
||||||
if (!test_facility(28))
|
if (!test_facility(28))
|
||||||
return;
|
return;
|
||||||
@ -366,6 +368,7 @@ static void clock_sync_global(unsigned long delta)
|
|||||||
{
|
{
|
||||||
unsigned long now, adj;
|
unsigned long now, adj;
|
||||||
struct ptff_qto qto;
|
struct ptff_qto qto;
|
||||||
|
int cs;
|
||||||
|
|
||||||
/* Fixup the monotonic sched clock. */
|
/* Fixup the monotonic sched clock. */
|
||||||
tod_clock_base.eitod += delta;
|
tod_clock_base.eitod += delta;
|
||||||
@ -381,7 +384,10 @@ static void clock_sync_global(unsigned long delta)
|
|||||||
panic("TOD clock sync offset %li is too large to drift\n",
|
panic("TOD clock sync offset %li is too large to drift\n",
|
||||||
tod_steering_delta);
|
tod_steering_delta);
|
||||||
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
tod_steering_end = now + (abs(tod_steering_delta) << 15);
|
||||||
vdso_data->arch_data.tod_steering_end = tod_steering_end;
|
for (cs = 0; cs < CS_BASES; cs++) {
|
||||||
|
vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
|
||||||
|
vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update LPAR offset. */
|
/* Update LPAR offset. */
|
||||||
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
|
||||||
|
@ -27,7 +27,7 @@ endif
|
|||||||
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
|
REALMODE_CFLAGS := -m16 -g -Os -DDISABLE_BRANCH_PROFILING \
|
||||||
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
-Wall -Wstrict-prototypes -march=i386 -mregparm=3 \
|
||||||
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
|
||||||
-mno-mmx -mno-sse
|
-mno-mmx -mno-sse $(call cc-option,-fcf-protection=none)
|
||||||
|
|
||||||
REALMODE_CFLAGS += -ffreestanding
|
REALMODE_CFLAGS += -ffreestanding
|
||||||
REALMODE_CFLAGS += -fno-stack-protector
|
REALMODE_CFLAGS += -fno-stack-protector
|
||||||
|
@ -132,6 +132,7 @@ void native_play_dead(void);
|
|||||||
void play_dead_common(void);
|
void play_dead_common(void);
|
||||||
void wbinvd_on_cpu(int cpu);
|
void wbinvd_on_cpu(int cpu);
|
||||||
int wbinvd_on_all_cpus(void);
|
int wbinvd_on_all_cpus(void);
|
||||||
|
bool wakeup_cpu0(void);
|
||||||
|
|
||||||
void native_smp_send_reschedule(int cpu);
|
void native_smp_send_reschedule(int cpu);
|
||||||
void native_send_call_func_ipi(const struct cpumask *mask);
|
void native_send_call_func_ipi(const struct cpumask *mask);
|
||||||
|
@ -86,18 +86,6 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
|
||||||
* The maximum amount of extra memory compared to the base size. The
|
|
||||||
* main scaling factor is the size of struct page. At extreme ratios
|
|
||||||
* of base:extra, all the base memory can be filled with page
|
|
||||||
* structures for the extra memory, leaving no space for anything
|
|
||||||
* else.
|
|
||||||
*
|
|
||||||
* 10x seems like a reasonable balance between scaling flexibility and
|
|
||||||
* leaving a practically usable system.
|
|
||||||
*/
|
|
||||||
#define XEN_EXTRA_MEM_RATIO (10)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Helper functions to write or read unsigned long values to/from
|
* Helper functions to write or read unsigned long values to/from
|
||||||
* memory, when the access may fault.
|
* memory, when the access may fault.
|
||||||
|
@ -1554,10 +1554,18 @@ void __init acpi_boot_table_init(void)
|
|||||||
/*
|
/*
|
||||||
* Initialize the ACPI boot-time table parser.
|
* Initialize the ACPI boot-time table parser.
|
||||||
*/
|
*/
|
||||||
if (acpi_table_init()) {
|
if (acpi_locate_initial_tables())
|
||||||
disable_acpi();
|
disable_acpi();
|
||||||
return;
|
else
|
||||||
}
|
acpi_reserve_initial_tables();
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init early_acpi_boot_init(void)
|
||||||
|
{
|
||||||
|
if (acpi_disabled)
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
acpi_table_init_complete();
|
||||||
|
|
||||||
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
|
acpi_table_parse(ACPI_SIG_BOOT, acpi_parse_sbf);
|
||||||
|
|
||||||
@ -1570,18 +1578,9 @@ void __init acpi_boot_table_init(void)
|
|||||||
} else {
|
} else {
|
||||||
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
|
printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
|
||||||
disable_acpi();
|
disable_acpi();
|
||||||
return;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
int __init early_acpi_boot_init(void)
|
|
||||||
{
|
|
||||||
/*
|
|
||||||
* If acpi_disabled, bail out
|
|
||||||
*/
|
|
||||||
if (acpi_disabled)
|
|
||||||
return 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Process the Multiple APIC Description Table (MADT), if present
|
* Process the Multiple APIC Description Table (MADT), if present
|
||||||
|
@ -1045,6 +1045,9 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
cleanup_highmap();
|
cleanup_highmap();
|
||||||
|
|
||||||
|
/* Look for ACPI tables and reserve memory occupied by them. */
|
||||||
|
acpi_boot_table_init();
|
||||||
|
|
||||||
memblock_set_current_limit(ISA_END_ADDRESS);
|
memblock_set_current_limit(ISA_END_ADDRESS);
|
||||||
e820__memblock_setup();
|
e820__memblock_setup();
|
||||||
|
|
||||||
@ -1136,11 +1139,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
early_platform_quirks();
|
early_platform_quirks();
|
||||||
|
|
||||||
/*
|
|
||||||
* Parse the ACPI tables for possible boot-time SMP configuration.
|
|
||||||
*/
|
|
||||||
acpi_boot_table_init();
|
|
||||||
|
|
||||||
early_acpi_boot_init();
|
early_acpi_boot_init();
|
||||||
|
|
||||||
initmem_init();
|
initmem_init();
|
||||||
|
@ -1659,7 +1659,7 @@ void play_dead_common(void)
|
|||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool wakeup_cpu0(void)
|
bool wakeup_cpu0(void)
|
||||||
{
|
{
|
||||||
if (smp_processor_id() == 0 && enable_start_cpu0)
|
if (smp_processor_id() == 0 && enable_start_cpu0)
|
||||||
return true;
|
return true;
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
ccflags-y += -Iarch/x86/kvm
|
ccflags-y += -I $(srctree)/arch/x86/kvm
|
||||||
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
ccflags-$(CONFIG_KVM_WERROR) += -Werror
|
||||||
|
|
||||||
ifeq ($(CONFIG_FRAME_POINTER),y)
|
ifeq ($(CONFIG_FRAME_POINTER),y)
|
||||||
|
@ -5884,6 +5884,7 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|||||||
struct kvm_mmu_page *sp;
|
struct kvm_mmu_page *sp;
|
||||||
unsigned int ratio;
|
unsigned int ratio;
|
||||||
LIST_HEAD(invalid_list);
|
LIST_HEAD(invalid_list);
|
||||||
|
bool flush = false;
|
||||||
ulong to_zap;
|
ulong to_zap;
|
||||||
|
|
||||||
rcu_idx = srcu_read_lock(&kvm->srcu);
|
rcu_idx = srcu_read_lock(&kvm->srcu);
|
||||||
@ -5905,19 +5906,19 @@ static void kvm_recover_nx_lpages(struct kvm *kvm)
|
|||||||
lpage_disallowed_link);
|
lpage_disallowed_link);
|
||||||
WARN_ON_ONCE(!sp->lpage_disallowed);
|
WARN_ON_ONCE(!sp->lpage_disallowed);
|
||||||
if (is_tdp_mmu_page(sp)) {
|
if (is_tdp_mmu_page(sp)) {
|
||||||
kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn,
|
flush = kvm_tdp_mmu_zap_sp(kvm, sp);
|
||||||
sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level));
|
|
||||||
} else {
|
} else {
|
||||||
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
||||||
WARN_ON_ONCE(sp->lpage_disallowed);
|
WARN_ON_ONCE(sp->lpage_disallowed);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
|
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||||
cond_resched_rwlock_write(&kvm->mmu_lock);
|
cond_resched_rwlock_write(&kvm->mmu_lock);
|
||||||
|
flush = false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
kvm_mmu_commit_zap_page(kvm, &invalid_list);
|
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||||
|
|
||||||
write_unlock(&kvm->mmu_lock);
|
write_unlock(&kvm->mmu_lock);
|
||||||
srcu_read_unlock(&kvm->srcu, rcu_idx);
|
srcu_read_unlock(&kvm->srcu, rcu_idx);
|
||||||
|
@ -86,7 +86,7 @@ static inline struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
|
|||||||
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
|
list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
|
||||||
|
|
||||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
gfn_t start, gfn_t end, bool can_yield);
|
gfn_t start, gfn_t end, bool can_yield, bool flush);
|
||||||
|
|
||||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
||||||
{
|
{
|
||||||
@ -99,7 +99,7 @@ void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root)
|
|||||||
|
|
||||||
list_del(&root->link);
|
list_del(&root->link);
|
||||||
|
|
||||||
zap_gfn_range(kvm, root, 0, max_gfn, false);
|
zap_gfn_range(kvm, root, 0, max_gfn, false, false);
|
||||||
|
|
||||||
free_page((unsigned long)root->spt);
|
free_page((unsigned long)root->spt);
|
||||||
kmem_cache_free(mmu_page_header_cache, root);
|
kmem_cache_free(mmu_page_header_cache, root);
|
||||||
@ -668,20 +668,21 @@ static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
|
|||||||
* scheduler needs the CPU or there is contention on the MMU lock. If this
|
* scheduler needs the CPU or there is contention on the MMU lock. If this
|
||||||
* function cannot yield, it will not release the MMU lock or reschedule and
|
* function cannot yield, it will not release the MMU lock or reschedule and
|
||||||
* the caller must ensure it does not supply too large a GFN range, or the
|
* the caller must ensure it does not supply too large a GFN range, or the
|
||||||
* operation can cause a soft lockup.
|
* operation can cause a soft lockup. Note, in some use cases a flush may be
|
||||||
|
* required by prior actions. Ensure the pending flush is performed prior to
|
||||||
|
* yielding.
|
||||||
*/
|
*/
|
||||||
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
||||||
gfn_t start, gfn_t end, bool can_yield)
|
gfn_t start, gfn_t end, bool can_yield, bool flush)
|
||||||
{
|
{
|
||||||
struct tdp_iter iter;
|
struct tdp_iter iter;
|
||||||
bool flush_needed = false;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
tdp_root_for_each_pte(iter, root, start, end) {
|
tdp_root_for_each_pte(iter, root, start, end) {
|
||||||
if (can_yield &&
|
if (can_yield &&
|
||||||
tdp_mmu_iter_cond_resched(kvm, &iter, flush_needed)) {
|
tdp_mmu_iter_cond_resched(kvm, &iter, flush)) {
|
||||||
flush_needed = false;
|
flush = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -699,11 +700,11 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
tdp_mmu_set_spte(kvm, &iter, 0);
|
tdp_mmu_set_spte(kvm, &iter, 0);
|
||||||
flush_needed = true;
|
flush = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return flush_needed;
|
return flush;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -712,13 +713,14 @@ static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
|
|||||||
* SPTEs have been cleared and a TLB flush is needed before releasing the
|
* SPTEs have been cleared and a TLB flush is needed before releasing the
|
||||||
* MMU lock.
|
* MMU lock.
|
||||||
*/
|
*/
|
||||||
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end)
|
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
|
||||||
|
bool can_yield)
|
||||||
{
|
{
|
||||||
struct kvm_mmu_page *root;
|
struct kvm_mmu_page *root;
|
||||||
bool flush = false;
|
bool flush = false;
|
||||||
|
|
||||||
for_each_tdp_mmu_root_yield_safe(kvm, root)
|
for_each_tdp_mmu_root_yield_safe(kvm, root)
|
||||||
flush |= zap_gfn_range(kvm, root, start, end, true);
|
flush = zap_gfn_range(kvm, root, start, end, can_yield, flush);
|
||||||
|
|
||||||
return flush;
|
return flush;
|
||||||
}
|
}
|
||||||
@ -930,7 +932,7 @@ static int zap_gfn_range_hva_wrapper(struct kvm *kvm,
|
|||||||
struct kvm_mmu_page *root, gfn_t start,
|
struct kvm_mmu_page *root, gfn_t start,
|
||||||
gfn_t end, unsigned long unused)
|
gfn_t end, unsigned long unused)
|
||||||
{
|
{
|
||||||
return zap_gfn_range(kvm, root, start, end, false);
|
return zap_gfn_range(kvm, root, start, end, false, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
int kvm_tdp_mmu_zap_hva_range(struct kvm *kvm, unsigned long start,
|
||||||
|
@ -8,7 +8,29 @@
|
|||||||
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
|
||||||
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root);
|
||||||
|
|
||||||
bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end);
|
bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start, gfn_t end,
|
||||||
|
bool can_yield);
|
||||||
|
static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, gfn_t start,
|
||||||
|
gfn_t end)
|
||||||
|
{
|
||||||
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, start, end, true);
|
||||||
|
}
|
||||||
|
static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
|
||||||
|
{
|
||||||
|
gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Don't allow yielding, as the caller may have a flush pending. Note,
|
||||||
|
* if mmu_lock is held for write, zapping will never yield in this case,
|
||||||
|
* but explicitly disallow it for safety. The TDP MMU does not yield
|
||||||
|
* until it has made forward progress (steps sideways), and when zapping
|
||||||
|
* a single shadow page that it's guaranteed to see (thus the mmu_lock
|
||||||
|
* requirement), its "step sideways" will always step beyond the bounds
|
||||||
|
* of the shadow page's gfn range and stop iterating before yielding.
|
||||||
|
*/
|
||||||
|
lockdep_assert_held_write(&kvm->mmu_lock);
|
||||||
|
return __kvm_tdp_mmu_zap_gfn_range(kvm, sp->gfn, end, false);
|
||||||
|
}
|
||||||
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
|
||||||
|
|
||||||
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
|
||||||
|
@ -246,11 +246,18 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
static bool nested_vmcb_check_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
||||||
{
|
{
|
||||||
struct kvm_vcpu *vcpu = &svm->vcpu;
|
struct kvm_vcpu *vcpu = &svm->vcpu;
|
||||||
bool vmcb12_lma;
|
bool vmcb12_lma;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* FIXME: these should be done after copying the fields,
|
||||||
|
* to avoid TOC/TOU races. For these save area checks
|
||||||
|
* the possible damage is limited since kvm_set_cr0 and
|
||||||
|
* kvm_set_cr4 handle failure; EFER_SVME is an exception
|
||||||
|
* so it is force-set later in nested_prepare_vmcb_save.
|
||||||
|
*/
|
||||||
if ((vmcb12->save.efer & EFER_SVME) == 0)
|
if ((vmcb12->save.efer & EFER_SVME) == 0)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -271,7 +278,7 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
|||||||
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
|
if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return nested_vmcb_check_controls(&vmcb12->control);
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void load_nested_vmcb_control(struct vcpu_svm *svm,
|
static void load_nested_vmcb_control(struct vcpu_svm *svm,
|
||||||
@ -396,7 +403,14 @@ static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12)
|
|||||||
svm->vmcb->save.gdtr = vmcb12->save.gdtr;
|
svm->vmcb->save.gdtr = vmcb12->save.gdtr;
|
||||||
svm->vmcb->save.idtr = vmcb12->save.idtr;
|
svm->vmcb->save.idtr = vmcb12->save.idtr;
|
||||||
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
|
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
|
||||||
svm_set_efer(&svm->vcpu, vmcb12->save.efer);
|
|
||||||
|
/*
|
||||||
|
* Force-set EFER_SVME even though it is checked earlier on the
|
||||||
|
* VMCB12, because the guest can flip the bit between the check
|
||||||
|
* and now. Clearing EFER_SVME would call svm_free_nested.
|
||||||
|
*/
|
||||||
|
svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
|
||||||
|
|
||||||
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
|
svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
|
||||||
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
|
svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
|
||||||
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
|
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2;
|
||||||
@ -468,7 +482,6 @@ int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa,
|
|||||||
|
|
||||||
|
|
||||||
svm->nested.vmcb12_gpa = vmcb12_gpa;
|
svm->nested.vmcb12_gpa = vmcb12_gpa;
|
||||||
load_nested_vmcb_control(svm, &vmcb12->control);
|
|
||||||
nested_prepare_vmcb_control(svm);
|
nested_prepare_vmcb_control(svm);
|
||||||
nested_prepare_vmcb_save(svm, vmcb12);
|
nested_prepare_vmcb_save(svm, vmcb12);
|
||||||
|
|
||||||
@ -515,7 +528,10 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
|
|||||||
if (WARN_ON_ONCE(!svm->nested.initialized))
|
if (WARN_ON_ONCE(!svm->nested.initialized))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!nested_vmcb_checks(svm, vmcb12)) {
|
load_nested_vmcb_control(svm, &vmcb12->control);
|
||||||
|
|
||||||
|
if (!nested_vmcb_check_save(svm, vmcb12) ||
|
||||||
|
!nested_vmcb_check_controls(&svm->nested.ctl)) {
|
||||||
vmcb12->control.exit_code = SVM_EXIT_ERR;
|
vmcb12->control.exit_code = SVM_EXIT_ERR;
|
||||||
vmcb12->control.exit_code_hi = 0;
|
vmcb12->control.exit_code_hi = 0;
|
||||||
vmcb12->control.exit_info_1 = 0;
|
vmcb12->control.exit_info_1 = 0;
|
||||||
@ -1209,6 +1225,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
|
|||||||
*/
|
*/
|
||||||
if (!(save->cr0 & X86_CR0_PG))
|
if (!(save->cr0 & X86_CR0_PG))
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
if (!(save->efer & EFER_SVME))
|
||||||
|
goto out_free;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All checks done, we can enter guest mode. L1 control fields
|
* All checks done, we can enter guest mode. L1 control fields
|
||||||
|
@ -98,6 +98,8 @@ static enum index msr_to_index(u32 msr)
|
|||||||
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
||||||
enum pmu_type type)
|
enum pmu_type type)
|
||||||
{
|
{
|
||||||
|
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
|
||||||
|
|
||||||
switch (msr) {
|
switch (msr) {
|
||||||
case MSR_F15H_PERF_CTL0:
|
case MSR_F15H_PERF_CTL0:
|
||||||
case MSR_F15H_PERF_CTL1:
|
case MSR_F15H_PERF_CTL1:
|
||||||
@ -105,6 +107,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
|||||||
case MSR_F15H_PERF_CTL3:
|
case MSR_F15H_PERF_CTL3:
|
||||||
case MSR_F15H_PERF_CTL4:
|
case MSR_F15H_PERF_CTL4:
|
||||||
case MSR_F15H_PERF_CTL5:
|
case MSR_F15H_PERF_CTL5:
|
||||||
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
|
||||||
|
return NULL;
|
||||||
|
fallthrough;
|
||||||
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
|
case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
|
||||||
if (type != PMU_TYPE_EVNTSEL)
|
if (type != PMU_TYPE_EVNTSEL)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -115,6 +120,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
|
|||||||
case MSR_F15H_PERF_CTR3:
|
case MSR_F15H_PERF_CTR3:
|
||||||
case MSR_F15H_PERF_CTR4:
|
case MSR_F15H_PERF_CTR4:
|
||||||
case MSR_F15H_PERF_CTR5:
|
case MSR_F15H_PERF_CTR5:
|
||||||
|
if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
|
||||||
|
return NULL;
|
||||||
|
fallthrough;
|
||||||
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
|
case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
|
||||||
if (type != PMU_TYPE_COUNTER)
|
if (type != PMU_TYPE_COUNTER)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -271,8 +271,7 @@ static struct kmem_cache *x86_emulator_cache;
|
|||||||
* When called, it means the previous get/set msr reached an invalid msr.
|
* When called, it means the previous get/set msr reached an invalid msr.
|
||||||
* Return true if we want to ignore/silent this failed msr access.
|
* Return true if we want to ignore/silent this failed msr access.
|
||||||
*/
|
*/
|
||||||
static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr,
|
static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write)
|
||||||
u64 data, bool write)
|
|
||||||
{
|
{
|
||||||
const char *op = write ? "wrmsr" : "rdmsr";
|
const char *op = write ? "wrmsr" : "rdmsr";
|
||||||
|
|
||||||
@ -1445,7 +1444,7 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
|
|||||||
if (r == KVM_MSR_RET_INVALID) {
|
if (r == KVM_MSR_RET_INVALID) {
|
||||||
/* Unconditionally clear the output for simplicity */
|
/* Unconditionally clear the output for simplicity */
|
||||||
*data = 0;
|
*data = 0;
|
||||||
if (kvm_msr_ignored_check(vcpu, index, 0, false))
|
if (kvm_msr_ignored_check(index, 0, false))
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1620,7 +1619,7 @@ static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu,
|
|||||||
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
|
int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
|
||||||
|
|
||||||
if (ret == KVM_MSR_RET_INVALID)
|
if (ret == KVM_MSR_RET_INVALID)
|
||||||
if (kvm_msr_ignored_check(vcpu, index, data, true))
|
if (kvm_msr_ignored_check(index, data, true))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1658,7 +1657,7 @@ static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu,
|
|||||||
if (ret == KVM_MSR_RET_INVALID) {
|
if (ret == KVM_MSR_RET_INVALID) {
|
||||||
/* Unconditionally clear *data for simplicity */
|
/* Unconditionally clear *data for simplicity */
|
||||||
*data = 0;
|
*data = 0;
|
||||||
if (kvm_msr_ignored_check(vcpu, index, 0, false))
|
if (kvm_msr_ignored_check(index, 0, false))
|
||||||
ret = 0;
|
ret = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2329,7 +2328,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
kvm_vcpu_write_tsc_offset(vcpu, offset);
|
||||||
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
|
||||||
|
|
||||||
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
|
spin_lock_irqsave(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||||
if (!matched) {
|
if (!matched) {
|
||||||
kvm->arch.nr_vcpus_matched_tsc = 0;
|
kvm->arch.nr_vcpus_matched_tsc = 0;
|
||||||
} else if (!already_matched) {
|
} else if (!already_matched) {
|
||||||
@ -2337,7 +2336,7 @@ static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
kvm_track_tsc_matching(vcpu);
|
kvm_track_tsc_matching(vcpu);
|
||||||
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
|
spin_unlock_irqrestore(&kvm->arch.pvclock_gtod_sync_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
|
||||||
@ -2559,13 +2558,16 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
|||||||
int i;
|
int i;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
struct kvm_arch *ka = &kvm->arch;
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
kvm_hv_invalidate_tsc_page(kvm);
|
kvm_hv_invalidate_tsc_page(kvm);
|
||||||
|
|
||||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
|
||||||
kvm_make_mclock_inprogress_request(kvm);
|
kvm_make_mclock_inprogress_request(kvm);
|
||||||
|
|
||||||
/* no guest entries from this point */
|
/* no guest entries from this point */
|
||||||
|
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
pvclock_update_vm_gtod_copy(kvm);
|
pvclock_update_vm_gtod_copy(kvm);
|
||||||
|
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||||
@ -2573,8 +2575,6 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
|
|||||||
/* guest entries allowed */
|
/* guest entries allowed */
|
||||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||||
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
|
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
|
||||||
|
|
||||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2582,17 +2582,18 @@ u64 get_kvmclock_ns(struct kvm *kvm)
|
|||||||
{
|
{
|
||||||
struct kvm_arch *ka = &kvm->arch;
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
struct pvclock_vcpu_time_info hv_clock;
|
struct pvclock_vcpu_time_info hv_clock;
|
||||||
|
unsigned long flags;
|
||||||
u64 ret;
|
u64 ret;
|
||||||
|
|
||||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
if (!ka->use_master_clock) {
|
if (!ka->use_master_clock) {
|
||||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
return get_kvmclock_base_ns() + ka->kvmclock_offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
hv_clock.tsc_timestamp = ka->master_cycle_now;
|
||||||
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
|
||||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
|
||||||
get_cpu();
|
get_cpu();
|
||||||
@ -2686,13 +2687,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
|
|||||||
* If the host uses TSC clock, then passthrough TSC as stable
|
* If the host uses TSC clock, then passthrough TSC as stable
|
||||||
* to the guest.
|
* to the guest.
|
||||||
*/
|
*/
|
||||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
use_master_clock = ka->use_master_clock;
|
use_master_clock = ka->use_master_clock;
|
||||||
if (use_master_clock) {
|
if (use_master_clock) {
|
||||||
host_tsc = ka->master_cycle_now;
|
host_tsc = ka->master_cycle_now;
|
||||||
kernel_ns = ka->master_kernel_ns;
|
kernel_ns = ka->master_kernel_ns;
|
||||||
}
|
}
|
||||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
/* Keep irq disabled to prevent changes to the clock */
|
/* Keep irq disabled to prevent changes to the clock */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
@ -5726,6 +5727,7 @@ set_pit2_out:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
case KVM_SET_CLOCK: {
|
case KVM_SET_CLOCK: {
|
||||||
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
struct kvm_clock_data user_ns;
|
struct kvm_clock_data user_ns;
|
||||||
u64 now_ns;
|
u64 now_ns;
|
||||||
|
|
||||||
@ -5744,8 +5746,22 @@ set_pit2_out:
|
|||||||
* pvclock_update_vm_gtod_copy().
|
* pvclock_update_vm_gtod_copy().
|
||||||
*/
|
*/
|
||||||
kvm_gen_update_masterclock(kvm);
|
kvm_gen_update_masterclock(kvm);
|
||||||
now_ns = get_kvmclock_ns(kvm);
|
|
||||||
kvm->arch.kvmclock_offset += user_ns.clock - now_ns;
|
/*
|
||||||
|
* This pairs with kvm_guest_time_update(): when masterclock is
|
||||||
|
* in use, we use master_kernel_ns + kvmclock_offset to set
|
||||||
|
* unsigned 'system_time' so if we use get_kvmclock_ns() (which
|
||||||
|
* is slightly ahead) here we risk going negative on unsigned
|
||||||
|
* 'system_time' when 'user_ns.clock' is very small.
|
||||||
|
*/
|
||||||
|
spin_lock_irq(&ka->pvclock_gtod_sync_lock);
|
||||||
|
if (kvm->arch.use_master_clock)
|
||||||
|
now_ns = ka->master_kernel_ns;
|
||||||
|
else
|
||||||
|
now_ns = get_kvmclock_base_ns();
|
||||||
|
ka->kvmclock_offset = user_ns.clock - now_ns;
|
||||||
|
spin_unlock_irq(&ka->pvclock_gtod_sync_lock);
|
||||||
|
|
||||||
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
kvm_make_all_cpus_request(kvm, KVM_REQ_CLOCK_UPDATE);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -7724,6 +7740,7 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||||||
struct kvm *kvm;
|
struct kvm *kvm;
|
||||||
struct kvm_vcpu *vcpu;
|
struct kvm_vcpu *vcpu;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
mutex_lock(&kvm_lock);
|
mutex_lock(&kvm_lock);
|
||||||
list_for_each_entry(kvm, &vm_list, vm_list)
|
list_for_each_entry(kvm, &vm_list, vm_list)
|
||||||
@ -7739,17 +7756,15 @@ static void kvm_hyperv_tsc_notifier(void)
|
|||||||
list_for_each_entry(kvm, &vm_list, vm_list) {
|
list_for_each_entry(kvm, &vm_list, vm_list) {
|
||||||
struct kvm_arch *ka = &kvm->arch;
|
struct kvm_arch *ka = &kvm->arch;
|
||||||
|
|
||||||
spin_lock(&ka->pvclock_gtod_sync_lock);
|
spin_lock_irqsave(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
pvclock_update_vm_gtod_copy(kvm);
|
pvclock_update_vm_gtod_copy(kvm);
|
||||||
|
spin_unlock_irqrestore(&ka->pvclock_gtod_sync_lock, flags);
|
||||||
|
|
||||||
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
||||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||||
|
|
||||||
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
kvm_for_each_vcpu(cpu, vcpu, kvm)
|
||||||
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
|
kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu);
|
||||||
|
|
||||||
spin_unlock(&ka->pvclock_gtod_sync_lock);
|
|
||||||
}
|
}
|
||||||
mutex_unlock(&kvm_lock);
|
mutex_unlock(&kvm_lock);
|
||||||
}
|
}
|
||||||
|
@ -250,7 +250,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
|
|||||||
void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
|
void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
|
||||||
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
|
||||||
|
|
||||||
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr);
|
|
||||||
u64 get_kvmclock_ns(struct kvm *kvm);
|
u64 get_kvmclock_ns(struct kvm *kvm);
|
||||||
|
|
||||||
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
|
int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
|
||||||
|
@ -262,7 +262,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
|
|||||||
if (pgprot_val(old_prot) == pgprot_val(new_prot))
|
if (pgprot_val(old_prot) == pgprot_val(new_prot))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
pa = pfn << page_level_shift(level);
|
pa = pfn << PAGE_SHIFT;
|
||||||
size = page_level_size(level);
|
size = page_level_size(level);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1936,7 +1936,7 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
|
|||||||
* add rsp, 8 // skip eth_type_trans's frame
|
* add rsp, 8 // skip eth_type_trans's frame
|
||||||
* ret // return to its caller
|
* ret // return to its caller
|
||||||
*/
|
*/
|
||||||
int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
|
||||||
const struct btf_func_model *m, u32 flags,
|
const struct btf_func_model *m, u32 flags,
|
||||||
struct bpf_tramp_progs *tprogs,
|
struct bpf_tramp_progs *tprogs,
|
||||||
void *orig_call)
|
void *orig_call)
|
||||||
@ -1975,6 +1975,15 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||||||
|
|
||||||
save_regs(m, &prog, nr_args, stack_size);
|
save_regs(m, &prog, nr_args, stack_size);
|
||||||
|
|
||||||
|
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||||
|
/* arg1: mov rdi, im */
|
||||||
|
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||||
|
if (emit_call(&prog, __bpf_tramp_enter, prog)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (fentry->nr_progs)
|
if (fentry->nr_progs)
|
||||||
if (invoke_bpf(m, &prog, fentry, stack_size))
|
if (invoke_bpf(m, &prog, fentry, stack_size))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -1993,8 +2002,7 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||||
if (fentry->nr_progs || fmod_ret->nr_progs)
|
restore_regs(m, &prog, nr_args, stack_size);
|
||||||
restore_regs(m, &prog, nr_args, stack_size);
|
|
||||||
|
|
||||||
/* call original function */
|
/* call original function */
|
||||||
if (emit_call(&prog, orig_call, prog)) {
|
if (emit_call(&prog, orig_call, prog)) {
|
||||||
@ -2003,6 +2011,9 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||||||
}
|
}
|
||||||
/* remember return value in a stack for bpf prog to access */
|
/* remember return value in a stack for bpf prog to access */
|
||||||
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
|
||||||
|
im->ip_after_call = prog;
|
||||||
|
memcpy(prog, ideal_nops[NOP_ATOMIC5], X86_PATCH_SIZE);
|
||||||
|
prog += X86_PATCH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fmod_ret->nr_progs) {
|
if (fmod_ret->nr_progs) {
|
||||||
@ -2033,9 +2044,17 @@ int arch_prepare_bpf_trampoline(void *image, void *image_end,
|
|||||||
* the return value is only updated on the stack and still needs to be
|
* the return value is only updated on the stack and still needs to be
|
||||||
* restored to R0.
|
* restored to R0.
|
||||||
*/
|
*/
|
||||||
if (flags & BPF_TRAMP_F_CALL_ORIG)
|
if (flags & BPF_TRAMP_F_CALL_ORIG) {
|
||||||
|
im->ip_epilogue = prog;
|
||||||
|
/* arg1: mov rdi, im */
|
||||||
|
emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
|
||||||
|
if (emit_call(&prog, __bpf_tramp_exit, prog)) {
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto cleanup;
|
||||||
|
}
|
||||||
/* restore original return value back into RAX */
|
/* restore original return value back into RAX */
|
||||||
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
|
||||||
|
}
|
||||||
|
|
||||||
EMIT1(0x5B); /* pop rbx */
|
EMIT1(0x5B); /* pop rbx */
|
||||||
EMIT1(0xC9); /* leave */
|
EMIT1(0xC9); /* leave */
|
||||||
@ -2225,7 +2244,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
padding = true;
|
padding = true;
|
||||||
goto skip_init_addrs;
|
goto skip_init_addrs;
|
||||||
}
|
}
|
||||||
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
||||||
if (!addrs) {
|
if (!addrs) {
|
||||||
prog = orig_prog;
|
prog = orig_prog;
|
||||||
goto out_addrs;
|
goto out_addrs;
|
||||||
@ -2317,7 +2336,7 @@ out_image:
|
|||||||
if (image)
|
if (image)
|
||||||
bpf_prog_fill_jited_linfo(prog, addrs + 1);
|
bpf_prog_fill_jited_linfo(prog, addrs + 1);
|
||||||
out_addrs:
|
out_addrs:
|
||||||
kfree(addrs);
|
kvfree(addrs);
|
||||||
kfree(jit_data);
|
kfree(jit_data);
|
||||||
prog->aux->jit_data = NULL;
|
prog->aux->jit_data = NULL;
|
||||||
}
|
}
|
||||||
|
@ -98,8 +98,8 @@ EXPORT_SYMBOL_GPL(xen_p2m_size);
|
|||||||
unsigned long xen_max_p2m_pfn __read_mostly;
|
unsigned long xen_max_p2m_pfn __read_mostly;
|
||||||
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
|
EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
|
||||||
|
|
||||||
#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
|
#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
|
||||||
#define P2M_LIMIT CONFIG_XEN_BALLOON_MEMORY_HOTPLUG_LIMIT
|
#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT
|
||||||
#else
|
#else
|
||||||
#define P2M_LIMIT 0
|
#define P2M_LIMIT 0
|
||||||
#endif
|
#endif
|
||||||
@ -416,9 +416,6 @@ void __init xen_vmalloc_p2m_tree(void)
|
|||||||
xen_p2m_last_pfn = xen_max_p2m_pfn;
|
xen_p2m_last_pfn = xen_max_p2m_pfn;
|
||||||
|
|
||||||
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
|
p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE;
|
||||||
if (!p2m_limit && IS_ENABLED(CONFIG_XEN_UNPOPULATED_ALLOC))
|
|
||||||
p2m_limit = xen_start_info->nr_pages * XEN_EXTRA_MEM_RATIO;
|
|
||||||
|
|
||||||
vm.flags = VM_ALLOC;
|
vm.flags = VM_ALLOC;
|
||||||
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
|
vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit),
|
||||||
PMD_SIZE * PMDS_PER_MID_PAGE);
|
PMD_SIZE * PMDS_PER_MID_PAGE);
|
||||||
|
@ -59,6 +59,18 @@ static struct {
|
|||||||
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
|
} xen_remap_buf __initdata __aligned(PAGE_SIZE);
|
||||||
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
|
static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The maximum amount of extra memory compared to the base size. The
|
||||||
|
* main scaling factor is the size of struct page. At extreme ratios
|
||||||
|
* of base:extra, all the base memory can be filled with page
|
||||||
|
* structures for the extra memory, leaving no space for anything
|
||||||
|
* else.
|
||||||
|
*
|
||||||
|
* 10x seems like a reasonable balance between scaling flexibility and
|
||||||
|
* leaving a practically usable system.
|
||||||
|
*/
|
||||||
|
#define EXTRA_MEM_RATIO (10)
|
||||||
|
|
||||||
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
|
static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
|
||||||
|
|
||||||
static void __init xen_parse_512gb(void)
|
static void __init xen_parse_512gb(void)
|
||||||
@ -778,13 +790,13 @@ char * __init xen_memory_setup(void)
|
|||||||
extra_pages += max_pages - max_pfn;
|
extra_pages += max_pages - max_pfn;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Clamp the amount of extra memory to a XEN_EXTRA_MEM_RATIO
|
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
|
||||||
* factor the base size.
|
* factor the base size.
|
||||||
*
|
*
|
||||||
* Make sure we have no memory above max_pages, as this area
|
* Make sure we have no memory above max_pages, as this area
|
||||||
* isn't handled by the p2m management.
|
* isn't handled by the p2m management.
|
||||||
*/
|
*/
|
||||||
extra_pages = min3(XEN_EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
|
extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
|
||||||
extra_pages, max_pages - max_pfn);
|
extra_pages, max_pages - max_pfn);
|
||||||
i = 0;
|
i = 0;
|
||||||
addr = xen_e820_table.entries[0].addr;
|
addr = xen_e820_table.entries[0].addr;
|
||||||
|
@ -99,37 +99,6 @@
|
|||||||
LOAD_CP_REGS_TAB(6)
|
LOAD_CP_REGS_TAB(6)
|
||||||
LOAD_CP_REGS_TAB(7)
|
LOAD_CP_REGS_TAB(7)
|
||||||
|
|
||||||
/*
|
|
||||||
* coprocessor_flush(struct thread_info*, index)
|
|
||||||
* a2 a3
|
|
||||||
*
|
|
||||||
* Save coprocessor registers for coprocessor 'index'.
|
|
||||||
* The register values are saved to or loaded from the coprocessor area
|
|
||||||
* inside the task_info structure.
|
|
||||||
*
|
|
||||||
* Note that this function doesn't update the coprocessor_owner information!
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
ENTRY(coprocessor_flush)
|
|
||||||
|
|
||||||
/* reserve 4 bytes on stack to save a0 */
|
|
||||||
abi_entry(4)
|
|
||||||
|
|
||||||
s32i a0, a1, 0
|
|
||||||
movi a0, .Lsave_cp_regs_jump_table
|
|
||||||
addx8 a3, a3, a0
|
|
||||||
l32i a4, a3, 4
|
|
||||||
l32i a3, a3, 0
|
|
||||||
add a2, a2, a4
|
|
||||||
beqz a3, 1f
|
|
||||||
callx0 a3
|
|
||||||
1: l32i a0, a1, 0
|
|
||||||
|
|
||||||
abi_ret(4)
|
|
||||||
|
|
||||||
ENDPROC(coprocessor_flush)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Entry condition:
|
* Entry condition:
|
||||||
*
|
*
|
||||||
@ -245,6 +214,39 @@ ENTRY(fast_coprocessor)
|
|||||||
|
|
||||||
ENDPROC(fast_coprocessor)
|
ENDPROC(fast_coprocessor)
|
||||||
|
|
||||||
|
.text
|
||||||
|
|
||||||
|
/*
|
||||||
|
* coprocessor_flush(struct thread_info*, index)
|
||||||
|
* a2 a3
|
||||||
|
*
|
||||||
|
* Save coprocessor registers for coprocessor 'index'.
|
||||||
|
* The register values are saved to or loaded from the coprocessor area
|
||||||
|
* inside the task_info structure.
|
||||||
|
*
|
||||||
|
* Note that this function doesn't update the coprocessor_owner information!
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
ENTRY(coprocessor_flush)
|
||||||
|
|
||||||
|
/* reserve 4 bytes on stack to save a0 */
|
||||||
|
abi_entry(4)
|
||||||
|
|
||||||
|
s32i a0, a1, 0
|
||||||
|
movi a0, .Lsave_cp_regs_jump_table
|
||||||
|
addx8 a3, a3, a0
|
||||||
|
l32i a4, a3, 4
|
||||||
|
l32i a3, a3, 0
|
||||||
|
add a2, a2, a4
|
||||||
|
beqz a3, 1f
|
||||||
|
callx0 a3
|
||||||
|
1: l32i a0, a1, 0
|
||||||
|
|
||||||
|
abi_ret(4)
|
||||||
|
|
||||||
|
ENDPROC(coprocessor_flush)
|
||||||
|
|
||||||
.data
|
.data
|
||||||
|
|
||||||
ENTRY(coprocessor_owner)
|
ENTRY(coprocessor_owner)
|
||||||
|
@ -112,8 +112,11 @@ good_area:
|
|||||||
*/
|
*/
|
||||||
fault = handle_mm_fault(vma, address, flags, regs);
|
fault = handle_mm_fault(vma, address, flags, regs);
|
||||||
|
|
||||||
if (fault_signal_pending(fault, regs))
|
if (fault_signal_pending(fault, regs)) {
|
||||||
|
if (!user_mode(regs))
|
||||||
|
goto bad_page_fault;
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||||
if (fault & VM_FAULT_OOM)
|
if (fault & VM_FAULT_OOM)
|
||||||
|
23
block/bio.c
23
block/bio.c
@ -277,7 +277,7 @@ static struct bio *__bio_chain_endio(struct bio *bio)
|
|||||||
{
|
{
|
||||||
struct bio *parent = bio->bi_private;
|
struct bio *parent = bio->bi_private;
|
||||||
|
|
||||||
if (!parent->bi_status)
|
if (bio->bi_status && !parent->bi_status)
|
||||||
parent->bi_status = bio->bi_status;
|
parent->bi_status = bio->bi_status;
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
return parent;
|
return parent;
|
||||||
@ -949,7 +949,7 @@ void bio_release_pages(struct bio *bio, bool mark_dirty)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(bio_release_pages);
|
EXPORT_SYMBOL_GPL(bio_release_pages);
|
||||||
|
|
||||||
static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
static void __bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
||||||
{
|
{
|
||||||
WARN_ON_ONCE(bio->bi_max_vecs);
|
WARN_ON_ONCE(bio->bi_max_vecs);
|
||||||
|
|
||||||
@ -959,11 +959,26 @@ static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
|||||||
bio->bi_iter.bi_size = iter->count;
|
bio->bi_iter.bi_size = iter->count;
|
||||||
bio_set_flag(bio, BIO_NO_PAGE_REF);
|
bio_set_flag(bio, BIO_NO_PAGE_REF);
|
||||||
bio_set_flag(bio, BIO_CLONED);
|
bio_set_flag(bio, BIO_CLONED);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter)
|
||||||
|
{
|
||||||
|
__bio_iov_bvec_set(bio, iter);
|
||||||
iov_iter_advance(iter, iter->count);
|
iov_iter_advance(iter, iter->count);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
|
||||||
|
{
|
||||||
|
struct request_queue *q = bio->bi_bdev->bd_disk->queue;
|
||||||
|
struct iov_iter i = *iter;
|
||||||
|
|
||||||
|
iov_iter_truncate(&i, queue_max_zone_append_sectors(q) << 9);
|
||||||
|
__bio_iov_bvec_set(bio, &i);
|
||||||
|
iov_iter_advance(iter, i.count);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
|
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1094,8 +1109,8 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (iov_iter_is_bvec(iter)) {
|
if (iov_iter_is_bvec(iter)) {
|
||||||
if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND))
|
if (bio_op(bio) == REQ_OP_ZONE_APPEND)
|
||||||
return -EINVAL;
|
return bio_iov_bvec_set_append(bio, iter);
|
||||||
return bio_iov_bvec_set(bio, iter);
|
return bio_iov_bvec_set(bio, iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -382,6 +382,14 @@ unsigned int blk_recalc_rq_segments(struct request *rq)
|
|||||||
switch (bio_op(rq->bio)) {
|
switch (bio_op(rq->bio)) {
|
||||||
case REQ_OP_DISCARD:
|
case REQ_OP_DISCARD:
|
||||||
case REQ_OP_SECURE_ERASE:
|
case REQ_OP_SECURE_ERASE:
|
||||||
|
if (queue_max_discard_segments(rq->q) > 1) {
|
||||||
|
struct bio *bio = rq->bio;
|
||||||
|
|
||||||
|
for_each_bio(bio)
|
||||||
|
nr_phys_segs++;
|
||||||
|
return nr_phys_segs;
|
||||||
|
}
|
||||||
|
return 1;
|
||||||
case REQ_OP_WRITE_ZEROES:
|
case REQ_OP_WRITE_ZEROES:
|
||||||
return 0;
|
return 0;
|
||||||
case REQ_OP_WRITE_SAME:
|
case REQ_OP_WRITE_SAME:
|
||||||
|
@ -302,7 +302,6 @@ static const char *const rqf_name[] = {
|
|||||||
RQF_NAME(QUIET),
|
RQF_NAME(QUIET),
|
||||||
RQF_NAME(ELVPRIV),
|
RQF_NAME(ELVPRIV),
|
||||||
RQF_NAME(IO_STAT),
|
RQF_NAME(IO_STAT),
|
||||||
RQF_NAME(ALLOCED),
|
|
||||||
RQF_NAME(PM),
|
RQF_NAME(PM),
|
||||||
RQF_NAME(HASHED),
|
RQF_NAME(HASHED),
|
||||||
RQF_NAME(STATS),
|
RQF_NAME(STATS),
|
||||||
|
@ -322,6 +322,13 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
|
|||||||
const char *dname;
|
const char *dname;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* disk_max_parts() won't be zero, either GENHD_FL_EXT_DEVT is set
|
||||||
|
* or 'minors' is passed to alloc_disk().
|
||||||
|
*/
|
||||||
|
if (partno >= disk_max_parts(disk))
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Partitions are not supported on zoned block devices that are used as
|
* Partitions are not supported on zoned block devices that are used as
|
||||||
* such.
|
* such.
|
||||||
|
@ -99,13 +99,12 @@ acpi_status acpi_ns_root_initialize(void)
|
|||||||
* just create and link the new node(s) here.
|
* just create and link the new node(s) here.
|
||||||
*/
|
*/
|
||||||
new_node =
|
new_node =
|
||||||
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_namespace_node));
|
acpi_ns_create_node(*ACPI_CAST_PTR(u32, init_val->name));
|
||||||
if (!new_node) {
|
if (!new_node) {
|
||||||
status = AE_NO_MEMORY;
|
status = AE_NO_MEMORY;
|
||||||
goto unlock_and_exit;
|
goto unlock_and_exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
ACPI_COPY_NAMESEG(new_node->name.ascii, init_val->name);
|
|
||||||
new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
|
new_node->descriptor_type = ACPI_DESC_TYPE_NAMED;
|
||||||
new_node->type = init_val->type;
|
new_node->type = init_val->type;
|
||||||
|
|
||||||
|
@ -9,6 +9,8 @@
|
|||||||
#ifndef _ACPI_INTERNAL_H_
|
#ifndef _ACPI_INTERNAL_H_
|
||||||
#define _ACPI_INTERNAL_H_
|
#define _ACPI_INTERNAL_H_
|
||||||
|
|
||||||
|
#include <linux/idr.h>
|
||||||
|
|
||||||
#define PREFIX "ACPI: "
|
#define PREFIX "ACPI: "
|
||||||
|
|
||||||
int early_acpi_osi_init(void);
|
int early_acpi_osi_init(void);
|
||||||
@ -96,9 +98,11 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
|
|||||||
|
|
||||||
extern struct list_head acpi_bus_id_list;
|
extern struct list_head acpi_bus_id_list;
|
||||||
|
|
||||||
|
#define ACPI_MAX_DEVICE_INSTANCES 4096
|
||||||
|
|
||||||
struct acpi_device_bus_id {
|
struct acpi_device_bus_id {
|
||||||
const char *bus_id;
|
const char *bus_id;
|
||||||
unsigned int instance_no;
|
struct ida instance_ida;
|
||||||
struct list_head node;
|
struct list_head node;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86
|
#ifdef CONFIG_X86
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
|
#include <asm/cpu.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
|
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
|
||||||
@ -541,6 +542,12 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
|
|||||||
wait_for_freeze();
|
wait_for_freeze();
|
||||||
} else
|
} else
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
|
#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
|
||||||
|
/* If NMI wants to wake up CPU0, start CPU0. */
|
||||||
|
if (wakeup_cpu0())
|
||||||
|
start_cpu0();
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Never reached */
|
/* Never reached */
|
||||||
|
@ -479,9 +479,8 @@ static void acpi_device_del(struct acpi_device *device)
|
|||||||
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
|
list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
|
||||||
if (!strcmp(acpi_device_bus_id->bus_id,
|
if (!strcmp(acpi_device_bus_id->bus_id,
|
||||||
acpi_device_hid(device))) {
|
acpi_device_hid(device))) {
|
||||||
if (acpi_device_bus_id->instance_no > 0)
|
ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
|
||||||
acpi_device_bus_id->instance_no--;
|
if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
|
||||||
else {
|
|
||||||
list_del(&acpi_device_bus_id->node);
|
list_del(&acpi_device_bus_id->node);
|
||||||
kfree_const(acpi_device_bus_id->bus_id);
|
kfree_const(acpi_device_bus_id->bus_id);
|
||||||
kfree(acpi_device_bus_id);
|
kfree(acpi_device_bus_id);
|
||||||
@ -631,6 +630,21 @@ static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int acpi_device_set_name(struct acpi_device *device,
|
||||||
|
struct acpi_device_bus_id *acpi_device_bus_id)
|
||||||
|
{
|
||||||
|
struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
|
||||||
|
int result;
|
||||||
|
|
||||||
|
result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
|
||||||
|
if (result < 0)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
device->pnp.instance_no = result;
|
||||||
|
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int acpi_device_add(struct acpi_device *device,
|
int acpi_device_add(struct acpi_device *device,
|
||||||
void (*release)(struct device *))
|
void (*release)(struct device *))
|
||||||
{
|
{
|
||||||
@ -665,7 +679,9 @@ int acpi_device_add(struct acpi_device *device,
|
|||||||
|
|
||||||
acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
|
acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
|
||||||
if (acpi_device_bus_id) {
|
if (acpi_device_bus_id) {
|
||||||
acpi_device_bus_id->instance_no++;
|
result = acpi_device_set_name(device, acpi_device_bus_id);
|
||||||
|
if (result)
|
||||||
|
goto err_unlock;
|
||||||
} else {
|
} else {
|
||||||
acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
|
acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
@ -681,9 +697,16 @@ int acpi_device_add(struct acpi_device *device,
|
|||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ida_init(&acpi_device_bus_id->instance_ida);
|
||||||
|
|
||||||
|
result = acpi_device_set_name(device, acpi_device_bus_id);
|
||||||
|
if (result) {
|
||||||
|
kfree(acpi_device_bus_id);
|
||||||
|
goto err_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
|
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
|
||||||
}
|
}
|
||||||
dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, acpi_device_bus_id->instance_no);
|
|
||||||
|
|
||||||
if (device->parent)
|
if (device->parent)
|
||||||
list_add_tail(&device->node, &device->parent->children);
|
list_add_tail(&device->node, &device->parent->children);
|
||||||
@ -1647,6 +1670,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
|
|||||||
device_initialize(&device->dev);
|
device_initialize(&device->dev);
|
||||||
dev_set_uevent_suppress(&device->dev, true);
|
dev_set_uevent_suppress(&device->dev, true);
|
||||||
acpi_init_coherency(device);
|
acpi_init_coherency(device);
|
||||||
|
/* Assume there are unmet deps to start with. */
|
||||||
|
device->dep_unmet = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
void acpi_device_add_finalize(struct acpi_device *device)
|
void acpi_device_add_finalize(struct acpi_device *device)
|
||||||
@ -1910,6 +1935,8 @@ static void acpi_scan_dep_init(struct acpi_device *adev)
|
|||||||
{
|
{
|
||||||
struct acpi_dep_data *dep;
|
struct acpi_dep_data *dep;
|
||||||
|
|
||||||
|
adev->dep_unmet = 0;
|
||||||
|
|
||||||
mutex_lock(&acpi_dep_list_lock);
|
mutex_lock(&acpi_dep_list_lock);
|
||||||
|
|
||||||
list_for_each_entry(dep, &acpi_dep_list, node) {
|
list_for_each_entry(dep, &acpi_dep_list, node) {
|
||||||
@ -1957,7 +1984,13 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
|
|||||||
return AE_CTRL_DEPTH;
|
return AE_CTRL_DEPTH;
|
||||||
|
|
||||||
acpi_scan_init_hotplug(device);
|
acpi_scan_init_hotplug(device);
|
||||||
if (!check_dep)
|
/*
|
||||||
|
* If check_dep is true at this point, the device has no dependencies,
|
||||||
|
* or the creation of the device object would have been postponed above.
|
||||||
|
*/
|
||||||
|
if (check_dep)
|
||||||
|
device->dep_unmet = 0;
|
||||||
|
else
|
||||||
acpi_scan_dep_init(device);
|
acpi_scan_dep_init(device);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -780,7 +780,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* acpi_table_init()
|
* acpi_locate_initial_tables()
|
||||||
*
|
*
|
||||||
* find RSDP, find and checksum SDT/XSDT.
|
* find RSDP, find and checksum SDT/XSDT.
|
||||||
* checksum all tables, print SDT/XSDT
|
* checksum all tables, print SDT/XSDT
|
||||||
@ -788,7 +788,7 @@ acpi_status acpi_os_table_override(struct acpi_table_header *existing_table,
|
|||||||
* result: sdt_entry[] is initialized
|
* result: sdt_entry[] is initialized
|
||||||
*/
|
*/
|
||||||
|
|
||||||
int __init acpi_table_init(void)
|
int __init acpi_locate_initial_tables(void)
|
||||||
{
|
{
|
||||||
acpi_status status;
|
acpi_status status;
|
||||||
|
|
||||||
@ -803,9 +803,45 @@ int __init acpi_table_init(void)
|
|||||||
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
|
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
|
||||||
if (ACPI_FAILURE(status))
|
if (ACPI_FAILURE(status))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
acpi_table_initrd_scan();
|
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init acpi_reserve_initial_tables(void)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ACPI_MAX_TABLES; i++) {
|
||||||
|
struct acpi_table_desc *table_desc = &initial_tables[i];
|
||||||
|
u64 start = table_desc->address;
|
||||||
|
u64 size = table_desc->length;
|
||||||
|
|
||||||
|
if (!start || !size)
|
||||||
|
break;
|
||||||
|
|
||||||
|
pr_info("Reserving %4s table memory at [mem 0x%llx-0x%llx]\n",
|
||||||
|
table_desc->signature.ascii, start, start + size - 1);
|
||||||
|
|
||||||
|
memblock_reserve(start, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init acpi_table_init_complete(void)
|
||||||
|
{
|
||||||
|
acpi_table_initrd_scan();
|
||||||
check_multiple_madt();
|
check_multiple_madt();
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init acpi_table_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = acpi_locate_initial_tables();
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
acpi_table_init_complete();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -147,6 +147,7 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
.callback = video_detect_force_vendor,
|
||||||
.ident = "Sony VPCEH3U1E",
|
.ident = "Sony VPCEH3U1E",
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
|
||||||
|
@ -470,12 +470,14 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
|
|||||||
char c;
|
char c;
|
||||||
|
|
||||||
for (; count-- > 0; (*ppos)++, tmp++) {
|
for (; count-- > 0; (*ppos)++, tmp++) {
|
||||||
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
|
if (((count + 1) & 0x1f) == 0) {
|
||||||
/*
|
/*
|
||||||
* let's be a little nice with other processes
|
* charlcd_write() is invoked as a VFS->write() callback
|
||||||
* that need some CPU
|
* and as such it is always invoked from preemptible
|
||||||
|
* context and may sleep.
|
||||||
*/
|
*/
|
||||||
schedule();
|
cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
if (get_user(c, tmp))
|
if (get_user(c, tmp))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
@ -537,12 +539,8 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
|
|||||||
int count = strlen(s);
|
int count = strlen(s);
|
||||||
|
|
||||||
for (; count-- > 0; tmp++) {
|
for (; count-- > 0; tmp++) {
|
||||||
if (!in_interrupt() && (((count + 1) & 0x1f) == 0))
|
if (((count + 1) & 0x1f) == 0)
|
||||||
/*
|
cond_resched();
|
||||||
* let's be a little nice with other processes
|
|
||||||
* that need some CPU
|
|
||||||
*/
|
|
||||||
schedule();
|
|
||||||
|
|
||||||
charlcd_write_char(lcd, *tmp);
|
charlcd_write_char(lcd, *tmp);
|
||||||
}
|
}
|
||||||
|
@ -96,6 +96,9 @@ static void deferred_probe_work_func(struct work_struct *work)
|
|||||||
|
|
||||||
get_device(dev);
|
get_device(dev);
|
||||||
|
|
||||||
|
kfree(dev->p->deferred_probe_reason);
|
||||||
|
dev->p->deferred_probe_reason = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Drop the mutex while probing each device; the probe path may
|
* Drop the mutex while probing each device; the probe path may
|
||||||
* manipulate the deferred list
|
* manipulate the deferred list
|
||||||
|
@ -305,7 +305,7 @@ static int rpm_get_suppliers(struct device *dev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rpm_put_suppliers(struct device *dev)
|
static void __rpm_put_suppliers(struct device *dev, bool try_to_suspend)
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
|
||||||
@ -313,10 +313,30 @@ static void rpm_put_suppliers(struct device *dev)
|
|||||||
device_links_read_lock_held()) {
|
device_links_read_lock_held()) {
|
||||||
|
|
||||||
while (refcount_dec_not_one(&link->rpm_active))
|
while (refcount_dec_not_one(&link->rpm_active))
|
||||||
pm_runtime_put(link->supplier);
|
pm_runtime_put_noidle(link->supplier);
|
||||||
|
|
||||||
|
if (try_to_suspend)
|
||||||
|
pm_request_idle(link->supplier);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rpm_put_suppliers(struct device *dev)
|
||||||
|
{
|
||||||
|
__rpm_put_suppliers(dev, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void rpm_suspend_suppliers(struct device *dev)
|
||||||
|
{
|
||||||
|
struct device_link *link;
|
||||||
|
int idx = device_links_read_lock();
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(link, &dev->links.suppliers, c_node,
|
||||||
|
device_links_read_lock_held())
|
||||||
|
pm_request_idle(link->supplier);
|
||||||
|
|
||||||
|
device_links_read_unlock(idx);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __rpm_callback - Run a given runtime PM callback for a given device.
|
* __rpm_callback - Run a given runtime PM callback for a given device.
|
||||||
* @cb: Runtime PM callback to run.
|
* @cb: Runtime PM callback to run.
|
||||||
@ -344,8 +364,10 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
retval = rpm_get_suppliers(dev);
|
retval = rpm_get_suppliers(dev);
|
||||||
if (retval)
|
if (retval) {
|
||||||
|
rpm_put_suppliers(dev);
|
||||||
goto fail;
|
goto fail;
|
||||||
|
}
|
||||||
|
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
}
|
}
|
||||||
@ -368,9 +390,9 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
|
|||||||
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
|
|| (dev->power.runtime_status == RPM_RESUMING && retval))) {
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
|
|
||||||
fail:
|
__rpm_put_suppliers(dev, false);
|
||||||
rpm_put_suppliers(dev);
|
|
||||||
|
|
||||||
|
fail:
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -642,8 +664,11 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (dev->power.irq_safe)
|
||||||
|
goto out;
|
||||||
|
|
||||||
/* Maybe the parent is now able to suspend. */
|
/* Maybe the parent is now able to suspend. */
|
||||||
if (parent && !parent->power.ignore_children && !dev->power.irq_safe) {
|
if (parent && !parent->power.ignore_children) {
|
||||||
spin_unlock(&dev->power.lock);
|
spin_unlock(&dev->power.lock);
|
||||||
|
|
||||||
spin_lock(&parent->power.lock);
|
spin_lock(&parent->power.lock);
|
||||||
@ -652,6 +677,14 @@ static int rpm_suspend(struct device *dev, int rpmflags)
|
|||||||
|
|
||||||
spin_lock(&dev->power.lock);
|
spin_lock(&dev->power.lock);
|
||||||
}
|
}
|
||||||
|
/* Maybe the suppliers are now able to suspend. */
|
||||||
|
if (dev->power.links_count > 0) {
|
||||||
|
spin_unlock_irq(&dev->power.lock);
|
||||||
|
|
||||||
|
rpm_suspend_suppliers(dev);
|
||||||
|
|
||||||
|
spin_lock_irq(&dev->power.lock);
|
||||||
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
|
trace_rpm_return_int_rcuidle(dev, _THIS_IP_, retval);
|
||||||
@ -1657,8 +1690,8 @@ void pm_runtime_get_suppliers(struct device *dev)
|
|||||||
device_links_read_lock_held())
|
device_links_read_lock_held())
|
||||||
if (link->flags & DL_FLAG_PM_RUNTIME) {
|
if (link->flags & DL_FLAG_PM_RUNTIME) {
|
||||||
link->supplier_preactivated = true;
|
link->supplier_preactivated = true;
|
||||||
refcount_inc(&link->rpm_active);
|
|
||||||
pm_runtime_get_sync(link->supplier);
|
pm_runtime_get_sync(link->supplier);
|
||||||
|
refcount_inc(&link->rpm_active);
|
||||||
}
|
}
|
||||||
|
|
||||||
device_links_read_unlock(idx);
|
device_links_read_unlock(idx);
|
||||||
@ -1671,6 +1704,8 @@ void pm_runtime_get_suppliers(struct device *dev)
|
|||||||
void pm_runtime_put_suppliers(struct device *dev)
|
void pm_runtime_put_suppliers(struct device *dev)
|
||||||
{
|
{
|
||||||
struct device_link *link;
|
struct device_link *link;
|
||||||
|
unsigned long flags;
|
||||||
|
bool put;
|
||||||
int idx;
|
int idx;
|
||||||
|
|
||||||
idx = device_links_read_lock();
|
idx = device_links_read_lock();
|
||||||
@ -1679,7 +1714,11 @@ void pm_runtime_put_suppliers(struct device *dev)
|
|||||||
device_links_read_lock_held())
|
device_links_read_lock_held())
|
||||||
if (link->supplier_preactivated) {
|
if (link->supplier_preactivated) {
|
||||||
link->supplier_preactivated = false;
|
link->supplier_preactivated = false;
|
||||||
if (refcount_dec_not_one(&link->rpm_active))
|
spin_lock_irqsave(&dev->power.lock, flags);
|
||||||
|
put = pm_runtime_status_suspended(dev) &&
|
||||||
|
refcount_dec_not_one(&link->rpm_active);
|
||||||
|
spin_unlock_irqrestore(&dev->power.lock, flags);
|
||||||
|
if (put)
|
||||||
pm_runtime_put(link->supplier);
|
pm_runtime_put(link->supplier);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1369,10 +1369,13 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (dev->zoned)
|
if (dev->zoned)
|
||||||
cmd->error = null_process_zoned_cmd(cmd, op,
|
sts = null_process_zoned_cmd(cmd, op, sector, nr_sectors);
|
||||||
sector, nr_sectors);
|
|
||||||
else
|
else
|
||||||
cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
|
sts = null_process_cmd(cmd, op, sector, nr_sectors);
|
||||||
|
|
||||||
|
/* Do not overwrite errors (e.g. timeout errors) */
|
||||||
|
if (cmd->error == BLK_STS_OK)
|
||||||
|
cmd->error = sts;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
nullb_complete_cmd(cmd);
|
nullb_complete_cmd(cmd);
|
||||||
@ -1451,8 +1454,20 @@ static bool should_requeue_request(struct request *rq)
|
|||||||
|
|
||||||
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
|
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
|
||||||
{
|
{
|
||||||
|
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
|
||||||
|
|
||||||
pr_info("rq %p timed out\n", rq);
|
pr_info("rq %p timed out\n", rq);
|
||||||
blk_mq_complete_request(rq);
|
|
||||||
|
/*
|
||||||
|
* If the device is marked as blocking (i.e. memory backed or zoned
|
||||||
|
* device), the submission path may be blocked waiting for resources
|
||||||
|
* and cause real timeouts. For these real timeouts, the submission
|
||||||
|
* path will complete the request using blk_mq_complete_request().
|
||||||
|
* Only fake timeouts need to execute blk_mq_complete_request() here.
|
||||||
|
*/
|
||||||
|
cmd->error = BLK_STS_TIMEOUT;
|
||||||
|
if (cmd->fake_timeout)
|
||||||
|
blk_mq_complete_request(rq);
|
||||||
return BLK_EH_DONE;
|
return BLK_EH_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1473,6 +1488,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
cmd->rq = bd->rq;
|
cmd->rq = bd->rq;
|
||||||
cmd->error = BLK_STS_OK;
|
cmd->error = BLK_STS_OK;
|
||||||
cmd->nq = nq;
|
cmd->nq = nq;
|
||||||
|
cmd->fake_timeout = should_timeout_request(bd->rq);
|
||||||
|
|
||||||
blk_mq_start_request(bd->rq);
|
blk_mq_start_request(bd->rq);
|
||||||
|
|
||||||
@ -1489,7 +1505,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (should_timeout_request(bd->rq))
|
if (cmd->fake_timeout)
|
||||||
return BLK_STS_OK;
|
return BLK_STS_OK;
|
||||||
|
|
||||||
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq));
|
||||||
|
@ -22,6 +22,7 @@ struct nullb_cmd {
|
|||||||
blk_status_t error;
|
blk_status_t error;
|
||||||
struct nullb_queue *nq;
|
struct nullb_queue *nq;
|
||||||
struct hrtimer timer;
|
struct hrtimer timer;
|
||||||
|
bool fake_timeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct nullb_queue {
|
struct nullb_queue {
|
||||||
|
@ -891,7 +891,7 @@ next:
|
|||||||
out:
|
out:
|
||||||
for (i = last_map; i < num; i++) {
|
for (i = last_map; i < num; i++) {
|
||||||
/* Don't zap current batch's valid persistent grants. */
|
/* Don't zap current batch's valid persistent grants. */
|
||||||
if(i >= last_map + segs_to_map)
|
if(i >= map_until)
|
||||||
pages[i]->persistent_gnt = NULL;
|
pages[i]->persistent_gnt = NULL;
|
||||||
pages[i]->handle = BLKBACK_INVALID_HANDLE;
|
pages[i]->handle = BLKBACK_INVALID_HANDLE;
|
||||||
}
|
}
|
||||||
|
@ -285,7 +285,7 @@ static int omap_l3_probe(struct platform_device *pdev)
|
|||||||
*/
|
*/
|
||||||
l3->debug_irq = platform_get_irq(pdev, 0);
|
l3->debug_irq = platform_get_irq(pdev, 0);
|
||||||
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
|
ret = devm_request_irq(l3->dev, l3->debug_irq, l3_interrupt_handler,
|
||||||
0x0, "l3-dbg-irq", l3);
|
IRQF_NO_THREAD, "l3-dbg-irq", l3);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(l3->dev, "request_irq failed for %d\n",
|
dev_err(l3->dev, "request_irq failed for %d\n",
|
||||||
l3->debug_irq);
|
l3->debug_irq);
|
||||||
@ -294,7 +294,7 @@ static int omap_l3_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
l3->app_irq = platform_get_irq(pdev, 1);
|
l3->app_irq = platform_get_irq(pdev, 1);
|
||||||
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
|
ret = devm_request_irq(l3->dev, l3->app_irq, l3_interrupt_handler,
|
||||||
0x0, "l3-app-irq", l3);
|
IRQF_NO_THREAD, "l3-app-irq", l3);
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
|
dev_err(l3->dev, "request_irq failed for %d\n", l3->app_irq);
|
||||||
|
|
||||||
|
@ -3053,7 +3053,9 @@ static int sysc_remove(struct platform_device *pdev)
|
|||||||
|
|
||||||
pm_runtime_put_sync(&pdev->dev);
|
pm_runtime_put_sync(&pdev->dev);
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
reset_control_assert(ddata->rsts);
|
|
||||||
|
if (!reset_control_status(ddata->rsts))
|
||||||
|
reset_control_assert(ddata->rsts);
|
||||||
|
|
||||||
unprepare:
|
unprepare:
|
||||||
sysc_unprepare(ddata);
|
sysc_unprepare(ddata);
|
||||||
|
@ -730,7 +730,8 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||||||
struct clk_rate_request parent_req = { };
|
struct clk_rate_request parent_req = { };
|
||||||
struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
|
struct clk_rcg2_gfx3d *cgfx = to_clk_rcg2_gfx3d(hw);
|
||||||
struct clk_hw *xo, *p0, *p1, *p2;
|
struct clk_hw *xo, *p0, *p1, *p2;
|
||||||
unsigned long request, p0_rate;
|
unsigned long p0_rate;
|
||||||
|
u8 mux_div = cgfx->div;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
p0 = cgfx->hws[0];
|
p0 = cgfx->hws[0];
|
||||||
@ -750,14 +751,15 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
request = req->rate;
|
if (mux_div == 0)
|
||||||
if (cgfx->div > 1)
|
mux_div = 1;
|
||||||
parent_req.rate = request = request * cgfx->div;
|
|
||||||
|
parent_req.rate = req->rate * mux_div;
|
||||||
|
|
||||||
/* This has to be a fixed rate PLL */
|
/* This has to be a fixed rate PLL */
|
||||||
p0_rate = clk_hw_get_rate(p0);
|
p0_rate = clk_hw_get_rate(p0);
|
||||||
|
|
||||||
if (request == p0_rate) {
|
if (parent_req.rate == p0_rate) {
|
||||||
req->rate = req->best_parent_rate = p0_rate;
|
req->rate = req->best_parent_rate = p0_rate;
|
||||||
req->best_parent_hw = p0;
|
req->best_parent_hw = p0;
|
||||||
return 0;
|
return 0;
|
||||||
@ -765,7 +767,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||||||
|
|
||||||
if (req->best_parent_hw == p0) {
|
if (req->best_parent_hw == p0) {
|
||||||
/* Are we going back to a previously used rate? */
|
/* Are we going back to a previously used rate? */
|
||||||
if (clk_hw_get_rate(p2) == request)
|
if (clk_hw_get_rate(p2) == parent_req.rate)
|
||||||
req->best_parent_hw = p2;
|
req->best_parent_hw = p2;
|
||||||
else
|
else
|
||||||
req->best_parent_hw = p1;
|
req->best_parent_hw = p1;
|
||||||
@ -780,8 +782,7 @@ static int clk_gfx3d_determine_rate(struct clk_hw *hw,
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
req->rate = req->best_parent_rate = parent_req.rate;
|
req->rate = req->best_parent_rate = parent_req.rate;
|
||||||
if (cgfx->div > 1)
|
req->rate /= mux_div;
|
||||||
req->rate /= cgfx->div;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -510,9 +510,12 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
|
|||||||
.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
|
.num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Resource name must match resource id present in cmd-db */
|
||||||
|
DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
|
||||||
|
|
||||||
static struct clk_hw *sc7280_rpmh_clocks[] = {
|
static struct clk_hw *sc7280_rpmh_clocks[] = {
|
||||||
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
|
[RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
|
||||||
[RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
|
[RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
|
||||||
[RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
|
[RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
|
||||||
[RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
|
[RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
|
||||||
[RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
|
[RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
|
||||||
|
@ -620,7 +620,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
|
|||||||
.name = "gcc_sdcc1_apps_clk_src",
|
.name = "gcc_sdcc1_apps_clk_src",
|
||||||
.parent_data = gcc_parent_data_1,
|
.parent_data = gcc_parent_data_1,
|
||||||
.num_parents = 5,
|
.num_parents = 5,
|
||||||
.ops = &clk_rcg2_ops,
|
.ops = &clk_rcg2_floor_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
|
|||||||
.name = "gcc_sdcc1_ice_core_clk_src",
|
.name = "gcc_sdcc1_ice_core_clk_src",
|
||||||
.parent_data = gcc_parent_data_0,
|
.parent_data = gcc_parent_data_0,
|
||||||
.num_parents = 4,
|
.num_parents = 4,
|
||||||
.ops = &clk_rcg2_floor_ops,
|
.ops = &clk_rcg2_ops,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ struct freq_attr cpufreq_freq_attr_##_name##_freqs = \
|
|||||||
__ATTR_RO(_name##_frequencies)
|
__ATTR_RO(_name##_frequencies)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* show_scaling_available_frequencies - show available normal frequencies for
|
* scaling_available_frequencies_show - show available normal frequencies for
|
||||||
* the specified CPU
|
* the specified CPU
|
||||||
*/
|
*/
|
||||||
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
|
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
|
||||||
@ -279,7 +279,7 @@ cpufreq_attr_available_freq(scaling_available);
|
|||||||
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
|
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* show_available_boost_freqs - show available boost frequencies for
|
* scaling_boost_frequencies_show - show available boost frequencies for
|
||||||
* the specified CPU
|
* the specified CPU
|
||||||
*/
|
*/
|
||||||
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
|
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
|
||||||
|
@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
|
|||||||
sizeof(*edev->nh), GFP_KERNEL);
|
sizeof(*edev->nh), GFP_KERNEL);
|
||||||
if (!edev->nh) {
|
if (!edev->nh) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
device_unregister(&edev->dev);
|
||||||
goto err_dev;
|
goto err_dev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||||||
struct client *client = file->private_data;
|
struct client *client = file->private_data;
|
||||||
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
|
spinlock_t *client_list_lock = &client->lynx->client_list_lock;
|
||||||
struct nosy_stats stats;
|
struct nosy_stats stats;
|
||||||
|
int ret;
|
||||||
|
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case NOSY_IOC_GET_STATS:
|
case NOSY_IOC_GET_STATS:
|
||||||
@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
case NOSY_IOC_START:
|
case NOSY_IOC_START:
|
||||||
|
ret = -EBUSY;
|
||||||
spin_lock_irq(client_list_lock);
|
spin_lock_irq(client_list_lock);
|
||||||
list_add_tail(&client->link, &client->lynx->client_list);
|
if (list_empty(&client->link)) {
|
||||||
|
list_add_tail(&client->link, &client->lynx->client_list);
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
spin_unlock_irq(client_list_lock);
|
spin_unlock_irq(client_list_lock);
|
||||||
|
|
||||||
return 0;
|
return ret;
|
||||||
|
|
||||||
case NOSY_IOC_STOP:
|
case NOSY_IOC_STOP:
|
||||||
spin_lock_irq(client_list_lock);
|
spin_lock_irq(client_list_lock);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user