Linux 4.12-rc5
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJZPdbLAAoJEHm+PkMAQRiGx4wH/1nCjfnl6fE8oJ24/1gEAOUh biFdqJkYZmlLYHVtYfLm4Ueg4adJdg0wx6qM/4RaAzmQVvLfDV34bc1qBf1+P95G kVF+osWyXrZo5cTwkwapHW/KNu4VJwAx2D1wrlxKDVG5AOrULH1pYOYGOpApEkZU 4N+q5+M0ce0GJpqtUZX+UnI33ygjdDbBxXoFKsr24B7eA0ouGbAJ7dC88WcaETL+ 2/7tT01SvDMo0jBSV0WIqlgXwZ5gp3yPGnklC3F4159Yze6VFrzHMKS/UpPF8o8E W9EbuzwxsKyXUifX2GY348L1f+47glen/1sedbuKnFhP6E9aqUQQJXvEO7ueQl4= =m2Gx -----END PGP SIGNATURE----- Merge tag 'v4.12-rc5' into for-4.13/block We've already got a few conflicts and upcoming work depends on some of the changes that have gone into mainline as regression fixes for this series. Pull in 4.12-rc5 to resolve these conflicts and make it easier on down stream trees to continue working on 4.13 changes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
commit
8f66439eec
@ -866,6 +866,15 @@
|
|||||||
|
|
||||||
dscc4.setup= [NET]
|
dscc4.setup= [NET]
|
||||||
|
|
||||||
|
dt_cpu_ftrs= [PPC]
|
||||||
|
Format: {"off" | "known"}
|
||||||
|
Control how the dt_cpu_ftrs device-tree binding is
|
||||||
|
used for CPU feature discovery and setup (if it
|
||||||
|
exists).
|
||||||
|
off: Do not use it, fall back to legacy cpu table.
|
||||||
|
known: Do not pass through unknown features to guests
|
||||||
|
or userspace, only those that the kernel is aware of.
|
||||||
|
|
||||||
dump_apple_properties [X86]
|
dump_apple_properties [X86]
|
||||||
Dump name and content of EFI device properties on
|
Dump name and content of EFI device properties on
|
||||||
x86 Macs. Useful for driver authors to determine
|
x86 Macs. Useful for driver authors to determine
|
||||||
|
@ -36,7 +36,7 @@ Optional properties:
|
|||||||
control gpios
|
control gpios
|
||||||
|
|
||||||
- threshold: allows setting the "click"-threshold in the range
|
- threshold: allows setting the "click"-threshold in the range
|
||||||
from 20 to 80.
|
from 0 to 80.
|
||||||
|
|
||||||
- gain: allows setting the sensitivity in the range from 0 to
|
- gain: allows setting the sensitivity in the range from 0 to
|
||||||
31. Note that lower values indicate higher
|
31. Note that lower values indicate higher
|
||||||
|
@ -26,6 +26,10 @@ Optional properties:
|
|||||||
- interrupt-controller : Indicates the switch is itself an interrupt
|
- interrupt-controller : Indicates the switch is itself an interrupt
|
||||||
controller. This is used for the PHY interrupts.
|
controller. This is used for the PHY interrupts.
|
||||||
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
#interrupt-cells = <2> : Controller uses two cells, number and flag
|
||||||
|
- eeprom-length : Set to the length of an EEPROM connected to the
|
||||||
|
switch. Must be set if the switch can not detect
|
||||||
|
the presence and/or size of a connected EEPROM,
|
||||||
|
otherwise optional.
|
||||||
- mdio : Container of PHY and devices on the switches MDIO
|
- mdio : Container of PHY and devices on the switches MDIO
|
||||||
bus.
|
bus.
|
||||||
- mdio? : Container of PHYs and devices on the external MDIO
|
- mdio? : Container of PHYs and devices on the external MDIO
|
||||||
|
@ -15,6 +15,10 @@ Optional properties:
|
|||||||
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
- phy-reset-active-high : If present then the reset sequence using the GPIO
|
||||||
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
specified in the "phy-reset-gpios" property is reversed (H=reset state,
|
||||||
L=operation state).
|
L=operation state).
|
||||||
|
- phy-reset-post-delay : Post reset delay in milliseconds. If present then
|
||||||
|
a delay of phy-reset-post-delay milliseconds will be observed after the
|
||||||
|
phy-reset-gpios has been toggled. Can be omitted thus no delay is
|
||||||
|
observed. Delay is in range of 1ms to 1000ms. Other delays are invalid.
|
||||||
- phy-supply : regulator that powers the Ethernet PHY.
|
- phy-supply : regulator that powers the Ethernet PHY.
|
||||||
- phy-handle : phandle to the PHY device connected to this device.
|
- phy-handle : phandle to the PHY device connected to this device.
|
||||||
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
|
- fixed-link : Assume a fixed link. See fixed-link.txt in the same directory.
|
||||||
|
@ -247,7 +247,6 @@ bias-bus-hold - latch weakly
|
|||||||
bias-pull-up - pull up the pin
|
bias-pull-up - pull up the pin
|
||||||
bias-pull-down - pull down the pin
|
bias-pull-down - pull down the pin
|
||||||
bias-pull-pin-default - use pin-default pull state
|
bias-pull-pin-default - use pin-default pull state
|
||||||
bi-directional - pin supports simultaneous input/output operations
|
|
||||||
drive-push-pull - drive actively high and low
|
drive-push-pull - drive actively high and low
|
||||||
drive-open-drain - drive with open drain
|
drive-open-drain - drive with open drain
|
||||||
drive-open-source - drive with open source
|
drive-open-source - drive with open source
|
||||||
@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X
|
|||||||
power-source - select between different power supplies
|
power-source - select between different power supplies
|
||||||
low-power-enable - enable low power mode
|
low-power-enable - enable low power mode
|
||||||
low-power-disable - disable low power mode
|
low-power-disable - disable low power mode
|
||||||
output-enable - enable output on pin regardless of output value
|
|
||||||
output-low - set the pin to output mode with low level
|
output-low - set the pin to output mode with low level
|
||||||
output-high - set the pin to output mode with high level
|
output-high - set the pin to output mode with high level
|
||||||
slew-rate - set the slew rate
|
slew-rate - set the slew rate
|
||||||
|
@ -10,6 +10,7 @@ Required properties:
|
|||||||
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
|
- "rockchip,rk3288-usb", "rockchip,rk3066-usb", "snps,dwc2": for rk3288 Soc;
|
||||||
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
|
- "lantiq,arx100-usb": The DWC2 USB controller instance in Lantiq ARX SoCs;
|
||||||
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
|
- "lantiq,xrx200-usb": The DWC2 USB controller instance in Lantiq XRX SoCs;
|
||||||
|
- "amlogic,meson8-usb": The DWC2 USB controller instance in Amlogic Meson8 SoCs;
|
||||||
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
|
- "amlogic,meson8b-usb": The DWC2 USB controller instance in Amlogic Meson8b SoCs;
|
||||||
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
|
- "amlogic,meson-gxbb-usb": The DWC2 USB controller instance in Amlogic S905 SoCs;
|
||||||
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
|
- "amcc,dwc-otg": The DWC2 USB controller instance in AMCC Canyonlands 460EX SoCs;
|
||||||
|
@ -15,7 +15,7 @@ It has been tested with the following devices:
|
|||||||
The driver allows configuration of the touch screen via a set of sysfs files:
|
The driver allows configuration of the touch screen via a set of sysfs files:
|
||||||
|
|
||||||
/sys/class/input/eventX/device/device/threshold:
|
/sys/class/input/eventX/device/device/threshold:
|
||||||
allows setting the "click"-threshold in the range from 20 to 80.
|
allows setting the "click"-threshold in the range from 0 to 80.
|
||||||
|
|
||||||
/sys/class/input/eventX/device/device/gain:
|
/sys/class/input/eventX/device/device/gain:
|
||||||
allows setting the sensitivity in the range from 0 to 31. Note that
|
allows setting the sensitivity in the range from 0 to 31. Note that
|
||||||
|
194
Documentation/networking/dpaa.txt
Normal file
194
Documentation/networking/dpaa.txt
Normal file
@ -0,0 +1,194 @@
|
|||||||
|
The QorIQ DPAA Ethernet Driver
|
||||||
|
==============================
|
||||||
|
|
||||||
|
Authors:
|
||||||
|
Madalin Bucur <madalin.bucur@nxp.com>
|
||||||
|
Camelia Groza <camelia.groza@nxp.com>
|
||||||
|
|
||||||
|
Contents
|
||||||
|
========
|
||||||
|
|
||||||
|
- DPAA Ethernet Overview
|
||||||
|
- DPAA Ethernet Supported SoCs
|
||||||
|
- Configuring DPAA Ethernet in your kernel
|
||||||
|
- DPAA Ethernet Frame Processing
|
||||||
|
- DPAA Ethernet Features
|
||||||
|
- Debugging
|
||||||
|
|
||||||
|
DPAA Ethernet Overview
|
||||||
|
======================
|
||||||
|
|
||||||
|
DPAA stands for Data Path Acceleration Architecture and it is a
|
||||||
|
set of networking acceleration IPs that are available on several
|
||||||
|
generations of SoCs, both on PowerPC and ARM64.
|
||||||
|
|
||||||
|
The Freescale DPAA architecture consists of a series of hardware blocks
|
||||||
|
that support Ethernet connectivity. The Ethernet driver depends upon the
|
||||||
|
following drivers in the Linux kernel:
|
||||||
|
|
||||||
|
- Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms)
|
||||||
|
drivers/iommu/fsl_*
|
||||||
|
- Frame Manager (FMan)
|
||||||
|
drivers/net/ethernet/freescale/fman
|
||||||
|
- Queue Manager (QMan), Buffer Manager (BMan)
|
||||||
|
drivers/soc/fsl/qbman
|
||||||
|
|
||||||
|
A simplified view of the dpaa_eth interfaces mapped to FMan MACs:
|
||||||
|
|
||||||
|
dpaa_eth /eth0\ ... /ethN\
|
||||||
|
driver | | | |
|
||||||
|
------------- ---- ----------- ---- -------------
|
||||||
|
-Ports / Tx Rx \ ... / Tx Rx \
|
||||||
|
FMan | | | |
|
||||||
|
-MACs | MAC0 | | MACN |
|
||||||
|
/ dtsec0 \ ... / dtsecN \ (or tgec)
|
||||||
|
/ \ / \(or memac)
|
||||||
|
--------- -------------- --- -------------- ---------
|
||||||
|
FMan, FMan Port, FMan SP, FMan MURAM drivers
|
||||||
|
---------------------------------------------------------
|
||||||
|
FMan HW blocks: MURAM, MACs, Ports, SP
|
||||||
|
---------------------------------------------------------
|
||||||
|
|
||||||
|
The dpaa_eth relation to the QMan, BMan and FMan:
|
||||||
|
________________________________
|
||||||
|
dpaa_eth / eth0 \
|
||||||
|
driver / \
|
||||||
|
--------- -^- -^- -^- --- ---------
|
||||||
|
QMan driver / \ / \ / \ \ / | BMan |
|
||||||
|
|Rx | |Rx | |Tx | |Tx | | driver |
|
||||||
|
--------- |Dfl| |Err| |Cnf| |FQs| | |
|
||||||
|
QMan HW |FQ | |FQ | |FQs| | | | |
|
||||||
|
/ \ / \ / \ \ / | |
|
||||||
|
--------- --- --- --- -v- ---------
|
||||||
|
| FMan QMI | |
|
||||||
|
| FMan HW FMan BMI | BMan HW |
|
||||||
|
----------------------- --------
|
||||||
|
|
||||||
|
where the acronyms used above (and in the code) are:
|
||||||
|
DPAA = Data Path Acceleration Architecture
|
||||||
|
FMan = DPAA Frame Manager
|
||||||
|
QMan = DPAA Queue Manager
|
||||||
|
BMan = DPAA Buffers Manager
|
||||||
|
QMI = QMan interface in FMan
|
||||||
|
BMI = BMan interface in FMan
|
||||||
|
FMan SP = FMan Storage Profiles
|
||||||
|
MURAM = Multi-user RAM in FMan
|
||||||
|
FQ = QMan Frame Queue
|
||||||
|
Rx Dfl FQ = default reception FQ
|
||||||
|
Rx Err FQ = Rx error frames FQ
|
||||||
|
Tx Cnf FQ = Tx confirmation FQs
|
||||||
|
Tx FQs = transmission frame queues
|
||||||
|
dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps)
|
||||||
|
tgec = ten gigabit Ethernet controller (10 Gbps)
|
||||||
|
memac = multirate Ethernet MAC (10/100/1000/10000)
|
||||||
|
|
||||||
|
DPAA Ethernet Supported SoCs
|
||||||
|
============================
|
||||||
|
|
||||||
|
The DPAA drivers enable the Ethernet controllers present on the following SoCs:
|
||||||
|
|
||||||
|
# PPC
|
||||||
|
P1023
|
||||||
|
P2041
|
||||||
|
P3041
|
||||||
|
P4080
|
||||||
|
P5020
|
||||||
|
P5040
|
||||||
|
T1023
|
||||||
|
T1024
|
||||||
|
T1040
|
||||||
|
T1042
|
||||||
|
T2080
|
||||||
|
T4240
|
||||||
|
B4860
|
||||||
|
|
||||||
|
# ARM
|
||||||
|
LS1043A
|
||||||
|
LS1046A
|
||||||
|
|
||||||
|
Configuring DPAA Ethernet in your kernel
|
||||||
|
========================================
|
||||||
|
|
||||||
|
To enable the DPAA Ethernet driver, the following Kconfig options are required:
|
||||||
|
|
||||||
|
# common for arch/arm64 and arch/powerpc platforms
|
||||||
|
CONFIG_FSL_DPAA=y
|
||||||
|
CONFIG_FSL_FMAN=y
|
||||||
|
CONFIG_FSL_DPAA_ETH=y
|
||||||
|
CONFIG_FSL_XGMAC_MDIO=y
|
||||||
|
|
||||||
|
# for arch/powerpc only
|
||||||
|
CONFIG_FSL_PAMU=y
|
||||||
|
|
||||||
|
# common options needed for the PHYs used on the RDBs
|
||||||
|
CONFIG_VITESSE_PHY=y
|
||||||
|
CONFIG_REALTEK_PHY=y
|
||||||
|
CONFIG_AQUANTIA_PHY=y
|
||||||
|
|
||||||
|
DPAA Ethernet Frame Processing
|
||||||
|
==============================
|
||||||
|
|
||||||
|
On Rx, buffers for the incoming frames are retrieved from one of the three
|
||||||
|
existing buffers pools. The driver initializes and seeds these, each with
|
||||||
|
buffers of different sizes: 1KB, 2KB and 4KB.
|
||||||
|
|
||||||
|
On Tx, all transmitted frames are returned to the driver through Tx
|
||||||
|
confirmation frame queues. The driver is then responsible for freeing the
|
||||||
|
buffers. In order to do this properly, a backpointer is added to the buffer
|
||||||
|
before transmission that points to the skb. When the buffer returns to the
|
||||||
|
driver on a confirmation FQ, the skb can be correctly consumed.
|
||||||
|
|
||||||
|
DPAA Ethernet Features
|
||||||
|
======================
|
||||||
|
|
||||||
|
Currently the DPAA Ethernet driver enables the basic features required for
|
||||||
|
a Linux Ethernet driver. The support for advanced features will be added
|
||||||
|
gradually.
|
||||||
|
|
||||||
|
The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx
|
||||||
|
checksum offload feature is enabled by default and cannot be controlled through
|
||||||
|
ethtool.
|
||||||
|
|
||||||
|
The driver has support for multiple prioritized Tx traffic classes. Priorities
|
||||||
|
range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with
|
||||||
|
strict priority levels. Each traffic class contains NR_CPU TX queues. By
|
||||||
|
default, only one traffic class is enabled and the lowest priority Tx queues
|
||||||
|
are used. Higher priority traffic classes can be enabled with the mqprio
|
||||||
|
qdisc. For example, all four traffic classes are enabled on an interface with
|
||||||
|
the following command. Furthermore, skb priority levels are mapped to traffic
|
||||||
|
classes as follows:
|
||||||
|
|
||||||
|
* priorities 0 to 3 - traffic class 0 (low priority)
|
||||||
|
* priorities 4 to 7 - traffic class 1 (medium-low priority)
|
||||||
|
* priorities 8 to 11 - traffic class 2 (medium-high priority)
|
||||||
|
* priorities 12 to 15 - traffic class 3 (high priority)
|
||||||
|
|
||||||
|
tc qdisc add dev <int> root handle 1: \
|
||||||
|
mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1
|
||||||
|
|
||||||
|
Debugging
|
||||||
|
=========
|
||||||
|
|
||||||
|
The following statistics are exported for each interface through ethtool:
|
||||||
|
|
||||||
|
- interrupt count per CPU
|
||||||
|
- Rx packets count per CPU
|
||||||
|
- Tx packets count per CPU
|
||||||
|
- Tx confirmed packets count per CPU
|
||||||
|
- Tx S/G frames count per CPU
|
||||||
|
- Tx error count per CPU
|
||||||
|
- Rx error count per CPU
|
||||||
|
- Rx error count per type
|
||||||
|
- congestion related statistics:
|
||||||
|
- congestion status
|
||||||
|
- time spent in congestion
|
||||||
|
- number of time the device entered congestion
|
||||||
|
- dropped packets count per cause
|
||||||
|
|
||||||
|
The driver also exports the following information in sysfs:
|
||||||
|
|
||||||
|
- the FQ IDs for each FQ type
|
||||||
|
/sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids
|
||||||
|
|
||||||
|
- the IDs of the buffer pools in use
|
||||||
|
/sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids
|
@ -1,7 +1,7 @@
|
|||||||
TCP protocol
|
TCP protocol
|
||||||
============
|
============
|
||||||
|
|
||||||
Last updated: 9 February 2008
|
Last updated: 3 June 2017
|
||||||
|
|
||||||
Contents
|
Contents
|
||||||
========
|
========
|
||||||
@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms.
|
|||||||
A congestion control mechanism can be registered through functions in
|
A congestion control mechanism can be registered through functions in
|
||||||
tcp_cong.c. The functions used by the congestion control mechanism are
|
tcp_cong.c. The functions used by the congestion control mechanism are
|
||||||
registered via passing a tcp_congestion_ops struct to
|
registered via passing a tcp_congestion_ops struct to
|
||||||
tcp_register_congestion_control. As a minimum name, ssthresh,
|
tcp_register_congestion_control. As a minimum, the congestion control
|
||||||
cong_avoid must be valid.
|
mechanism must provide a valid name and must implement either ssthresh,
|
||||||
|
cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook.
|
||||||
|
|
||||||
Private data for a congestion control mechanism is stored in tp->ca_priv.
|
Private data for a congestion control mechanism is stored in tp->ca_priv.
|
||||||
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
|
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
|
||||||
is important to check the size of your private data will fit this space, or
|
is important to check the size of your private data will fit this space, or
|
||||||
alternatively space could be allocated elsewhere and a pointer to it could
|
alternatively, space could be allocated elsewhere and a pointer to it could
|
||||||
be stored here.
|
be stored here.
|
||||||
|
|
||||||
There are three kinds of congestion control algorithms currently: The
|
There are three kinds of congestion control algorithms currently: The
|
||||||
simplest ones are derived from TCP reno (highspeed, scalable) and just
|
simplest ones are derived from TCP reno (highspeed, scalable) and just
|
||||||
provide an alternative the congestion window calculation. More complex
|
provide an alternative congestion window calculation. More complex
|
||||||
ones like BIC try to look at other events to provide better
|
ones like BIC try to look at other events to provide better
|
||||||
heuristics. There are also round trip time based algorithms like
|
heuristics. There are also round trip time based algorithms like
|
||||||
Vegas and Westwood+.
|
Vegas and Westwood+.
|
||||||
@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm
|
|||||||
needs to maintain fairness and performance. Please review current
|
needs to maintain fairness and performance. Please review current
|
||||||
research and RFC's before developing new modules.
|
research and RFC's before developing new modules.
|
||||||
|
|
||||||
The method that is used to determine which congestion control mechanism is
|
The default congestion control mechanism is chosen based on the
|
||||||
determined by the setting of the sysctl net.ipv4.tcp_congestion_control.
|
DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default
|
||||||
The default congestion control will be the last one registered (LIFO);
|
value then you can set it using sysctl net.ipv4.tcp_congestion_control. The
|
||||||
so if you built everything as modules, the default will be reno. If you
|
module will be autoloaded if needed and you will get the expected protocol. If
|
||||||
build with the defaults from Kconfig, then CUBIC will be builtin (not a
|
you ask for an unknown congestion method, then the sysctl attempt will fail.
|
||||||
module) and it will end up the default.
|
|
||||||
|
|
||||||
If you really want a particular default value then you will need
|
If you remove a TCP congestion control module, then you will get the next
|
||||||
to set it with the sysctl. If you use a sysctl, the module will be autoloaded
|
|
||||||
if needed and you will get the expected protocol. If you ask for an
|
|
||||||
unknown congestion method, then the sysctl attempt will fail.
|
|
||||||
|
|
||||||
If you remove a tcp congestion control module, then you will get the next
|
|
||||||
available one. Since reno cannot be built as a module, and cannot be
|
available one. Since reno cannot be built as a module, and cannot be
|
||||||
deleted, it will always be available.
|
removed, it will always be available.
|
||||||
|
|
||||||
How the new TCP output machine [nyi] works.
|
How the new TCP output machine [nyi] works.
|
||||||
===========================================
|
===========================================
|
||||||
|
35
MAINTAINERS
35
MAINTAINERS
@ -1172,7 +1172,7 @@ N: clps711x
|
|||||||
|
|
||||||
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
|
ARM/CIRRUS LOGIC EP93XX ARM ARCHITECTURE
|
||||||
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
M: Hartley Sweeten <hsweeten@visionengravers.com>
|
||||||
M: Ryan Mallon <rmallon@gmail.com>
|
M: Alexander Sverdlin <alexander.sverdlin@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-ep93xx/
|
F: arch/arm/mach-ep93xx/
|
||||||
@ -1489,13 +1489,15 @@ M: Gregory Clement <gregory.clement@free-electrons.com>
|
|||||||
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-mvebu/
|
|
||||||
F: drivers/rtc/rtc-armada38x.c
|
|
||||||
F: arch/arm/boot/dts/armada*
|
F: arch/arm/boot/dts/armada*
|
||||||
F: arch/arm/boot/dts/kirkwood*
|
F: arch/arm/boot/dts/kirkwood*
|
||||||
|
F: arch/arm/configs/mvebu_*_defconfig
|
||||||
|
F: arch/arm/mach-mvebu/
|
||||||
F: arch/arm64/boot/dts/marvell/armada*
|
F: arch/arm64/boot/dts/marvell/armada*
|
||||||
F: drivers/cpufreq/mvebu-cpufreq.c
|
F: drivers/cpufreq/mvebu-cpufreq.c
|
||||||
F: arch/arm/configs/mvebu_*_defconfig
|
F: drivers/irqchip/irq-armada-370-xp.c
|
||||||
|
F: drivers/irqchip/irq-mvebu-*
|
||||||
|
F: drivers/rtc/rtc-armada38x.c
|
||||||
|
|
||||||
ARM/Marvell Berlin SoC support
|
ARM/Marvell Berlin SoC support
|
||||||
M: Jisheng Zhang <jszhang@marvell.com>
|
M: Jisheng Zhang <jszhang@marvell.com>
|
||||||
@ -1721,7 +1723,6 @@ N: rockchip
|
|||||||
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
|
||||||
M: Kukjin Kim <kgene@kernel.org>
|
M: Kukjin Kim <kgene@kernel.org>
|
||||||
M: Krzysztof Kozlowski <krzk@kernel.org>
|
M: Krzysztof Kozlowski <krzk@kernel.org>
|
||||||
R: Javier Martinez Canillas <javier@osg.samsung.com>
|
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||||
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
|
||||||
@ -1829,7 +1830,6 @@ F: drivers/edac/altera_edac.
|
|||||||
ARM/STI ARCHITECTURE
|
ARM/STI ARCHITECTURE
|
||||||
M: Patrice Chotard <patrice.chotard@st.com>
|
M: Patrice Chotard <patrice.chotard@st.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
L: kernel@stlinux.com
|
|
||||||
W: http://www.stlinux.com
|
W: http://www.stlinux.com
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm/mach-sti/
|
F: arch/arm/mach-sti/
|
||||||
@ -5622,7 +5622,7 @@ F: scripts/get_maintainer.pl
|
|||||||
|
|
||||||
GENWQE (IBM Generic Workqueue Card)
|
GENWQE (IBM Generic Workqueue Card)
|
||||||
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
|
M: Frank Haverkamp <haver@linux.vnet.ibm.com>
|
||||||
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
|
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
|
||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/misc/genwqe/
|
F: drivers/misc/genwqe/
|
||||||
|
|
||||||
@ -5667,7 +5667,6 @@ F: tools/testing/selftests/gpio/
|
|||||||
|
|
||||||
GPIO SUBSYSTEM
|
GPIO SUBSYSTEM
|
||||||
M: Linus Walleij <linus.walleij@linaro.org>
|
M: Linus Walleij <linus.walleij@linaro.org>
|
||||||
M: Alexandre Courbot <gnurou@gmail.com>
|
|
||||||
L: linux-gpio@vger.kernel.org
|
L: linux-gpio@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -7143,7 +7142,7 @@ S: Maintained
|
|||||||
F: drivers/media/platform/rcar_jpu.c
|
F: drivers/media/platform/rcar_jpu.c
|
||||||
|
|
||||||
JSM Neo PCI based serial card
|
JSM Neo PCI based serial card
|
||||||
M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
|
M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com>
|
||||||
L: linux-serial@vger.kernel.org
|
L: linux-serial@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/tty/serial/jsm/
|
F: drivers/tty/serial/jsm/
|
||||||
@ -7707,7 +7706,7 @@ F: drivers/platform/x86/hp_accel.c
|
|||||||
|
|
||||||
LIVE PATCHING
|
LIVE PATCHING
|
||||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||||
M: Jessica Yu <jeyu@redhat.com>
|
M: Jessica Yu <jeyu@kernel.org>
|
||||||
M: Jiri Kosina <jikos@kernel.org>
|
M: Jiri Kosina <jikos@kernel.org>
|
||||||
M: Miroslav Benes <mbenes@suse.cz>
|
M: Miroslav Benes <mbenes@suse.cz>
|
||||||
R: Petr Mladek <pmladek@suse.com>
|
R: Petr Mladek <pmladek@suse.com>
|
||||||
@ -8508,7 +8507,7 @@ S: Odd Fixes
|
|||||||
F: drivers/media/radio/radio-miropcm20*
|
F: drivers/media/radio/radio-miropcm20*
|
||||||
|
|
||||||
MELLANOX MLX4 core VPI driver
|
MELLANOX MLX4 core VPI driver
|
||||||
M: Yishai Hadas <yishaih@mellanox.com>
|
M: Tariq Toukan <tariqt@mellanox.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: linux-rdma@vger.kernel.org
|
L: linux-rdma@vger.kernel.org
|
||||||
W: http://www.mellanox.com
|
W: http://www.mellanox.com
|
||||||
@ -8516,7 +8515,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/mellanox/mlx4/
|
F: drivers/net/ethernet/mellanox/mlx4/
|
||||||
F: include/linux/mlx4/
|
F: include/linux/mlx4/
|
||||||
F: include/uapi/rdma/mlx4-abi.h
|
|
||||||
|
|
||||||
MELLANOX MLX4 IB driver
|
MELLANOX MLX4 IB driver
|
||||||
M: Yishai Hadas <yishaih@mellanox.com>
|
M: Yishai Hadas <yishaih@mellanox.com>
|
||||||
@ -8526,6 +8524,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/mlx4/
|
F: drivers/infiniband/hw/mlx4/
|
||||||
F: include/linux/mlx4/
|
F: include/linux/mlx4/
|
||||||
|
F: include/uapi/rdma/mlx4-abi.h
|
||||||
|
|
||||||
MELLANOX MLX5 core VPI driver
|
MELLANOX MLX5 core VPI driver
|
||||||
M: Saeed Mahameed <saeedm@mellanox.com>
|
M: Saeed Mahameed <saeedm@mellanox.com>
|
||||||
@ -8538,7 +8537,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/net/ethernet/mellanox/mlx5/core/
|
F: drivers/net/ethernet/mellanox/mlx5/core/
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
F: include/uapi/rdma/mlx5-abi.h
|
|
||||||
|
|
||||||
MELLANOX MLX5 IB driver
|
MELLANOX MLX5 IB driver
|
||||||
M: Matan Barak <matanb@mellanox.com>
|
M: Matan Barak <matanb@mellanox.com>
|
||||||
@ -8549,6 +8547,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/infiniband/hw/mlx5/
|
F: drivers/infiniband/hw/mlx5/
|
||||||
F: include/linux/mlx5/
|
F: include/linux/mlx5/
|
||||||
|
F: include/uapi/rdma/mlx5-abi.h
|
||||||
|
|
||||||
MELEXIS MLX90614 DRIVER
|
MELEXIS MLX90614 DRIVER
|
||||||
M: Crt Mori <cmo@melexis.com>
|
M: Crt Mori <cmo@melexis.com>
|
||||||
@ -8588,7 +8587,7 @@ S: Maintained
|
|||||||
F: drivers/media/dvb-frontends/mn88473*
|
F: drivers/media/dvb-frontends/mn88473*
|
||||||
|
|
||||||
MODULE SUPPORT
|
MODULE SUPPORT
|
||||||
M: Jessica Yu <jeyu@redhat.com>
|
M: Jessica Yu <jeyu@kernel.org>
|
||||||
M: Rusty Russell <rusty@rustcorp.com.au>
|
M: Rusty Russell <rusty@rustcorp.com.au>
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -10450,7 +10449,7 @@ S: Orphan
|
|||||||
|
|
||||||
PXA RTC DRIVER
|
PXA RTC DRIVER
|
||||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||||
L: rtc-linux@googlegroups.com
|
L: linux-rtc@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
QAT DRIVER
|
QAT DRIVER
|
||||||
@ -10757,7 +10756,7 @@ X: kernel/torture.c
|
|||||||
REAL TIME CLOCK (RTC) SUBSYSTEM
|
REAL TIME CLOCK (RTC) SUBSYSTEM
|
||||||
M: Alessandro Zummo <a.zummo@towertech.it>
|
M: Alessandro Zummo <a.zummo@towertech.it>
|
||||||
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
||||||
L: rtc-linux@googlegroups.com
|
L: linux-rtc@vger.kernel.org
|
||||||
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
|
Q: http://patchwork.ozlabs.org/project/rtc-linux/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -11268,7 +11267,6 @@ F: drivers/media/rc/serial_ir.c
|
|||||||
|
|
||||||
STI CEC DRIVER
|
STI CEC DRIVER
|
||||||
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
|
||||||
L: kernel@stlinux.com
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/staging/media/st-cec/
|
F: drivers/staging/media/st-cec/
|
||||||
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
F: Documentation/devicetree/bindings/media/stih-cec.txt
|
||||||
@ -11778,6 +11776,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nsekhar/linux-davinci.git
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: arch/arm/mach-davinci/
|
F: arch/arm/mach-davinci/
|
||||||
F: drivers/i2c/busses/i2c-davinci.c
|
F: drivers/i2c/busses/i2c-davinci.c
|
||||||
|
F: arch/arm/boot/dts/da850*
|
||||||
|
|
||||||
TI DAVINCI SERIES MEDIA DRIVER
|
TI DAVINCI SERIES MEDIA DRIVER
|
||||||
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
|
M: "Lad, Prabhakar" <prabhakar.csengg@gmail.com>
|
||||||
@ -13861,7 +13860,7 @@ S: Odd fixes
|
|||||||
F: drivers/net/wireless/wl3501*
|
F: drivers/net/wireless/wl3501*
|
||||||
|
|
||||||
WOLFSON MICROELECTRONICS DRIVERS
|
WOLFSON MICROELECTRONICS DRIVERS
|
||||||
L: patches@opensource.wolfsonmicro.com
|
L: patches@opensource.cirrus.com
|
||||||
T: git https://github.com/CirrusLogic/linux-drivers.git
|
T: git https://github.com/CirrusLogic/linux-drivers.git
|
||||||
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
W: https://github.com/CirrusLogic/linux-drivers/wiki
|
||||||
S: Supported
|
S: Supported
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 12
|
PATCHLEVEL = 12
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc2
|
EXTRAVERSION = -rc5
|
||||||
NAME = Fearless Coyote
|
NAME = Fearless Coyote
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -17,14 +17,12 @@
|
|||||||
@ there.
|
@ there.
|
||||||
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
|
.inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000
|
||||||
#else
|
#else
|
||||||
mov r0, r0
|
W(mov) r0, r0
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro __EFI_HEADER
|
.macro __EFI_HEADER
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
b __efi_start
|
|
||||||
|
|
||||||
.set start_offset, __efi_start - start
|
.set start_offset, __efi_start - start
|
||||||
.org start + 0x3c
|
.org start + 0x3c
|
||||||
@
|
@
|
||||||
|
@ -130,19 +130,22 @@ start:
|
|||||||
.rept 7
|
.rept 7
|
||||||
__nop
|
__nop
|
||||||
.endr
|
.endr
|
||||||
ARM( mov r0, r0 )
|
#ifndef CONFIG_THUMB2_KERNEL
|
||||||
ARM( b 1f )
|
mov r0, r0
|
||||||
THUMB( badr r12, 1f )
|
#else
|
||||||
THUMB( bx r12 )
|
AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode
|
||||||
|
M_CLASS( nop.w ) @ M: already in Thumb2 mode
|
||||||
|
.thumb
|
||||||
|
#endif
|
||||||
|
W(b) 1f
|
||||||
|
|
||||||
.word _magic_sig @ Magic numbers to help the loader
|
.word _magic_sig @ Magic numbers to help the loader
|
||||||
.word _magic_start @ absolute load/run zImage address
|
.word _magic_start @ absolute load/run zImage address
|
||||||
.word _magic_end @ zImage end address
|
.word _magic_end @ zImage end address
|
||||||
.word 0x04030201 @ endianness flag
|
.word 0x04030201 @ endianness flag
|
||||||
|
|
||||||
THUMB( .thumb )
|
__EFI_HEADER
|
||||||
1: __EFI_HEADER
|
1:
|
||||||
|
|
||||||
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
|
ARM_BE8( setend be ) @ go BE8 if compiled for BE8
|
||||||
AR_CLASS( mrs r9, cpsr )
|
AR_CLASS( mrs r9, cpsr )
|
||||||
#ifdef CONFIG_ARM_VIRT_EXT
|
#ifdef CONFIG_ARM_VIRT_EXT
|
||||||
|
@ -3,6 +3,11 @@
|
|||||||
#include <dt-bindings/clock/bcm2835-aux.h>
|
#include <dt-bindings/clock/bcm2835-aux.h>
|
||||||
#include <dt-bindings/gpio/gpio.h>
|
#include <dt-bindings/gpio/gpio.h>
|
||||||
|
|
||||||
|
/* firmware-provided startup stubs live here, where the secondary CPUs are
|
||||||
|
* spinning.
|
||||||
|
*/
|
||||||
|
/memreserve/ 0x00000000 0x00001000;
|
||||||
|
|
||||||
/* This include file covers the common peripherals and configuration between
|
/* This include file covers the common peripherals and configuration between
|
||||||
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
* bcm2835 and bcm2836 implementations, leaving the CPU configuration to
|
||||||
* bcm2835.dtsi and bcm2836.dtsi.
|
* bcm2835.dtsi and bcm2836.dtsi.
|
||||||
|
@ -120,10 +120,16 @@
|
|||||||
|
|
||||||
ethphy0: ethernet-phy@2 {
|
ethphy0: ethernet-phy@2 {
|
||||||
reg = <2>;
|
reg = <2>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
};
|
};
|
||||||
|
|
||||||
ethphy1: ethernet-phy@1 {
|
ethphy1: ethernet-phy@1 {
|
||||||
reg = <1>;
|
reg = <1>;
|
||||||
|
micrel,led-mode = <1>;
|
||||||
|
clocks = <&clks IMX6UL_CLK_ENET2_REF>;
|
||||||
|
clock-names = "rmii-ref";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -137,8 +137,8 @@ netcp: netcp@26000000 {
|
|||||||
/* NetCP address range */
|
/* NetCP address range */
|
||||||
ranges = <0 0x26000000 0x1000000>;
|
ranges = <0 0x26000000 0x1000000>;
|
||||||
|
|
||||||
clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>, <&clkosr>;
|
clocks = <&clkpa>, <&clkcpgmac>, <&chipclk12>;
|
||||||
clock-names = "pa_clk", "ethss_clk", "cpts", "osr_clk";
|
clock-names = "pa_clk", "ethss_clk", "cpts";
|
||||||
dma-coherent;
|
dma-coherent;
|
||||||
|
|
||||||
ti,navigator-dmas = <&dma_gbe 0>,
|
ti,navigator-dmas = <&dma_gbe 0>,
|
||||||
|
@ -232,6 +232,14 @@
|
|||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
osr: sram@70000000 {
|
||||||
|
compatible = "mmio-sram";
|
||||||
|
reg = <0x70000000 0x10000>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <1>;
|
||||||
|
clocks = <&clkosr>;
|
||||||
|
};
|
||||||
|
|
||||||
dspgpio0: keystone_dsp_gpio@02620240 {
|
dspgpio0: keystone_dsp_gpio@02620240 {
|
||||||
compatible = "ti,keystone-dsp-gpio";
|
compatible = "ti,keystone-dsp-gpio";
|
||||||
gpio-controller;
|
gpio-controller;
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
#include <versatile-ab.dts>
|
#include "versatile-ab.dts"
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
model = "ARM Versatile PB";
|
model = "ARM Versatile PB";
|
||||||
|
@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
typedef void (*phys_reset_t)(unsigned long);
|
typedef typeof(cpu_reset) phys_reset_t;
|
||||||
|
|
||||||
void mcpm_cpu_power_down(void)
|
void mcpm_cpu_power_down(void)
|
||||||
{
|
{
|
||||||
@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void)
|
|||||||
* on the CPU.
|
* on the CPU.
|
||||||
*/
|
*/
|
||||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
phys_reset(__pa_symbol(mcpm_entry_point), false);
|
||||||
|
|
||||||
/* should never get here */
|
/* should never get here */
|
||||||
BUG();
|
BUG();
|
||||||
@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg)
|
|||||||
__mcpm_cpu_down(cpu, cluster);
|
__mcpm_cpu_down(cpu, cluster);
|
||||||
|
|
||||||
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
|
||||||
phys_reset(__pa_symbol(mcpm_entry_point));
|
phys_reset(__pa_symbol(mcpm_entry_point), false);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,8 @@ struct dev_archdata {
|
|||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
const struct dma_map_ops *dev_dma_ops;
|
const struct dma_map_ops *dev_dma_ops;
|
||||||
#endif
|
#endif
|
||||||
bool dma_coherent;
|
unsigned int dma_coherent:1;
|
||||||
|
unsigned int dma_ops_setup:1;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct omap_device;
|
struct omap_device;
|
||||||
|
@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t;
|
|||||||
#define pgprot_noncached(prot) (prot)
|
#define pgprot_noncached(prot) (prot)
|
||||||
#define pgprot_writecombine(prot) (prot)
|
#define pgprot_writecombine(prot) (prot)
|
||||||
#define pgprot_dmacoherent(prot) (prot)
|
#define pgprot_dmacoherent(prot) (prot)
|
||||||
|
#define pgprot_device(prot) (prot)
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -104,7 +104,6 @@ __do_hyp_init:
|
|||||||
@ - Write permission implies XN: disabled
|
@ - Write permission implies XN: disabled
|
||||||
@ - Instruction cache: enabled
|
@ - Instruction cache: enabled
|
||||||
@ - Data/Unified cache: enabled
|
@ - Data/Unified cache: enabled
|
||||||
@ - Memory alignment checks: enabled
|
|
||||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||||
ldr r2, =HSCTLR_MASK
|
ldr r2, =HSCTLR_MASK
|
||||||
@ -112,8 +111,8 @@ __do_hyp_init:
|
|||||||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||||
and r1, r1, r2
|
and r1, r1, r2
|
||||||
ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
|
ARM( ldr r2, =(HSCTLR_M) )
|
||||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||||
orr r1, r1, r2
|
orr r1, r1, r2
|
||||||
orr r0, r0, r1
|
orr r0, r0, r1
|
||||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
menuconfig ARCH_AT91
|
menuconfig ARCH_AT91
|
||||||
bool "Atmel SoCs"
|
bool "Atmel SoCs"
|
||||||
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
|
depends on ARCH_MULTI_V4T || ARCH_MULTI_V5 || ARCH_MULTI_V7
|
||||||
|
select ARM_CPU_SUSPEND if PM
|
||||||
select COMMON_CLK_AT91
|
select COMMON_CLK_AT91
|
||||||
select GPIOLIB
|
select GPIOLIB
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
|
@ -153,7 +153,8 @@ int __init davinci_pm_init(void)
|
|||||||
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
|
davinci_sram_suspend = sram_alloc(davinci_cpu_suspend_sz, NULL);
|
||||||
if (!davinci_sram_suspend) {
|
if (!davinci_sram_suspend) {
|
||||||
pr_err("PM: cannot allocate SRAM memory\n");
|
pr_err("PM: cannot allocate SRAM memory\n");
|
||||||
return -ENOMEM;
|
ret = -ENOMEM;
|
||||||
|
goto no_sram_mem;
|
||||||
}
|
}
|
||||||
|
|
||||||
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
|
davinci_sram_push(davinci_sram_suspend, davinci_cpu_suspend,
|
||||||
@ -161,6 +162,10 @@ int __init davinci_pm_init(void)
|
|||||||
|
|
||||||
suspend_set_ops(&davinci_pm_ops);
|
suspend_set_ops(&davinci_pm_ops);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
no_sram_mem:
|
||||||
|
iounmap(pm_config.ddrpsc_reg_base);
|
||||||
no_ddrpsc_mem:
|
no_ddrpsc_mem:
|
||||||
iounmap(pm_config.ddrpll_reg_base);
|
iounmap(pm_config.ddrpll_reg_base);
|
||||||
no_ddrpll_mem:
|
no_ddrpll_mem:
|
||||||
|
@ -2311,7 +2311,14 @@ int arm_iommu_attach_device(struct device *dev,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
|
||||||
|
|
||||||
static void __arm_iommu_detach_device(struct device *dev)
|
/**
|
||||||
|
* arm_iommu_detach_device
|
||||||
|
* @dev: valid struct device pointer
|
||||||
|
*
|
||||||
|
* Detaches the provided device from a previously attached map.
|
||||||
|
* This voids the dma operations (dma_map_ops pointer)
|
||||||
|
*/
|
||||||
|
void arm_iommu_detach_device(struct device *dev)
|
||||||
{
|
{
|
||||||
struct dma_iommu_mapping *mapping;
|
struct dma_iommu_mapping *mapping;
|
||||||
|
|
||||||
@ -2324,22 +2331,10 @@ static void __arm_iommu_detach_device(struct device *dev)
|
|||||||
iommu_detach_device(mapping->domain, dev);
|
iommu_detach_device(mapping->domain, dev);
|
||||||
kref_put(&mapping->kref, release_iommu_mapping);
|
kref_put(&mapping->kref, release_iommu_mapping);
|
||||||
to_dma_iommu_mapping(dev) = NULL;
|
to_dma_iommu_mapping(dev) = NULL;
|
||||||
|
set_dma_ops(dev, NULL);
|
||||||
|
|
||||||
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* arm_iommu_detach_device
|
|
||||||
* @dev: valid struct device pointer
|
|
||||||
*
|
|
||||||
* Detaches the provided device from a previously attached map.
|
|
||||||
* This voids the dma operations (dma_map_ops pointer)
|
|
||||||
*/
|
|
||||||
void arm_iommu_detach_device(struct device *dev)
|
|
||||||
{
|
|
||||||
__arm_iommu_detach_device(dev);
|
|
||||||
set_dma_ops(dev, NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
|
||||||
|
|
||||||
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
|
||||||
@ -2379,7 +2374,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
|
|||||||
if (!mapping)
|
if (!mapping)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
__arm_iommu_detach_device(dev);
|
arm_iommu_detach_device(dev);
|
||||||
arm_iommu_release_mapping(mapping);
|
arm_iommu_release_mapping(mapping);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2430,9 +2425,13 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
|||||||
dev->dma_ops = xen_dma_ops;
|
dev->dma_ops = xen_dma_ops;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
dev->archdata.dma_ops_setup = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_teardown_dma_ops(struct device *dev)
|
void arch_teardown_dma_ops(struct device *dev)
|
||||||
{
|
{
|
||||||
|
if (!dev->archdata.dma_ops_setup)
|
||||||
|
return;
|
||||||
|
|
||||||
arm_teardown_iommu_dma_ops(dev);
|
arm_teardown_iommu_dma_ops(dev);
|
||||||
}
|
}
|
||||||
|
@ -1084,10 +1084,6 @@ config SYSVIPC_COMPAT
|
|||||||
def_bool y
|
def_bool y
|
||||||
depends on COMPAT && SYSVIPC
|
depends on COMPAT && SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y
|
|
||||||
depends on COMPAT && KEYS
|
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
menu "Power management options"
|
menu "Power management options"
|
||||||
|
@ -231,8 +231,7 @@
|
|||||||
cpm_crypto: crypto@800000 {
|
cpm_crypto: crypto@800000 {
|
||||||
compatible = "inside-secure,safexcel-eip197";
|
compatible = "inside-secure,safexcel-eip197";
|
||||||
reg = <0x800000 0x200000>;
|
reg = <0x800000 0x200000>;
|
||||||
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
| IRQ_TYPE_LEVEL_HIGH)>,
|
|
||||||
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
|
@ -221,8 +221,7 @@
|
|||||||
cps_crypto: crypto@800000 {
|
cps_crypto: crypto@800000 {
|
||||||
compatible = "inside-secure,safexcel-eip197";
|
compatible = "inside-secure,safexcel-eip197";
|
||||||
reg = <0x800000 0x200000>;
|
reg = <0x800000 0x200000>;
|
||||||
interrupts = <GIC_SPI 34 (IRQ_TYPE_EDGE_RISING
|
interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
| IRQ_TYPE_LEVEL_HIGH)>,
|
|
||||||
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 279 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
|
<GIC_SPI 280 IRQ_TYPE_LEVEL_HIGH>,
|
||||||
|
@ -68,6 +68,7 @@ CONFIG_PCIE_QCOM=y
|
|||||||
CONFIG_PCIE_ARMADA_8K=y
|
CONFIG_PCIE_ARMADA_8K=y
|
||||||
CONFIG_PCI_AARDVARK=y
|
CONFIG_PCI_AARDVARK=y
|
||||||
CONFIG_PCIE_RCAR=y
|
CONFIG_PCIE_RCAR=y
|
||||||
|
CONFIG_PCIE_ROCKCHIP=m
|
||||||
CONFIG_PCI_HOST_GENERIC=y
|
CONFIG_PCI_HOST_GENERIC=y
|
||||||
CONFIG_PCI_XGENE=y
|
CONFIG_PCI_XGENE=y
|
||||||
CONFIG_ARM64_VA_BITS_48=y
|
CONFIG_ARM64_VA_BITS_48=y
|
||||||
@ -208,6 +209,8 @@ CONFIG_BRCMFMAC=m
|
|||||||
CONFIG_WL18XX=m
|
CONFIG_WL18XX=m
|
||||||
CONFIG_WLCORE_SDIO=m
|
CONFIG_WLCORE_SDIO=m
|
||||||
CONFIG_INPUT_EVDEV=y
|
CONFIG_INPUT_EVDEV=y
|
||||||
|
CONFIG_KEYBOARD_ADC=m
|
||||||
|
CONFIG_KEYBOARD_CROS_EC=y
|
||||||
CONFIG_KEYBOARD_GPIO=y
|
CONFIG_KEYBOARD_GPIO=y
|
||||||
CONFIG_INPUT_MISC=y
|
CONFIG_INPUT_MISC=y
|
||||||
CONFIG_INPUT_PM8941_PWRKEY=y
|
CONFIG_INPUT_PM8941_PWRKEY=y
|
||||||
@ -263,6 +266,7 @@ CONFIG_SPI_MESON_SPIFC=m
|
|||||||
CONFIG_SPI_ORION=y
|
CONFIG_SPI_ORION=y
|
||||||
CONFIG_SPI_PL022=y
|
CONFIG_SPI_PL022=y
|
||||||
CONFIG_SPI_QUP=y
|
CONFIG_SPI_QUP=y
|
||||||
|
CONFIG_SPI_ROCKCHIP=y
|
||||||
CONFIG_SPI_S3C64XX=y
|
CONFIG_SPI_S3C64XX=y
|
||||||
CONFIG_SPI_SPIDEV=m
|
CONFIG_SPI_SPIDEV=m
|
||||||
CONFIG_SPMI=y
|
CONFIG_SPMI=y
|
||||||
@ -292,6 +296,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
|
|||||||
CONFIG_CPU_THERMAL=y
|
CONFIG_CPU_THERMAL=y
|
||||||
CONFIG_THERMAL_EMULATION=y
|
CONFIG_THERMAL_EMULATION=y
|
||||||
CONFIG_EXYNOS_THERMAL=y
|
CONFIG_EXYNOS_THERMAL=y
|
||||||
|
CONFIG_ROCKCHIP_THERMAL=m
|
||||||
CONFIG_WATCHDOG=y
|
CONFIG_WATCHDOG=y
|
||||||
CONFIG_S3C2410_WATCHDOG=y
|
CONFIG_S3C2410_WATCHDOG=y
|
||||||
CONFIG_MESON_GXBB_WATCHDOG=m
|
CONFIG_MESON_GXBB_WATCHDOG=m
|
||||||
@ -300,12 +305,14 @@ CONFIG_RENESAS_WDT=y
|
|||||||
CONFIG_BCM2835_WDT=y
|
CONFIG_BCM2835_WDT=y
|
||||||
CONFIG_MFD_CROS_EC=y
|
CONFIG_MFD_CROS_EC=y
|
||||||
CONFIG_MFD_CROS_EC_I2C=y
|
CONFIG_MFD_CROS_EC_I2C=y
|
||||||
|
CONFIG_MFD_CROS_EC_SPI=y
|
||||||
CONFIG_MFD_EXYNOS_LPASS=m
|
CONFIG_MFD_EXYNOS_LPASS=m
|
||||||
CONFIG_MFD_HI655X_PMIC=y
|
CONFIG_MFD_HI655X_PMIC=y
|
||||||
CONFIG_MFD_MAX77620=y
|
CONFIG_MFD_MAX77620=y
|
||||||
CONFIG_MFD_SPMI_PMIC=y
|
CONFIG_MFD_SPMI_PMIC=y
|
||||||
CONFIG_MFD_RK808=y
|
CONFIG_MFD_RK808=y
|
||||||
CONFIG_MFD_SEC_CORE=y
|
CONFIG_MFD_SEC_CORE=y
|
||||||
|
CONFIG_REGULATOR_FAN53555=y
|
||||||
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
CONFIG_REGULATOR_FIXED_VOLTAGE=y
|
||||||
CONFIG_REGULATOR_GPIO=y
|
CONFIG_REGULATOR_GPIO=y
|
||||||
CONFIG_REGULATOR_HI655X=y
|
CONFIG_REGULATOR_HI655X=y
|
||||||
@ -473,8 +480,10 @@ CONFIG_ARCH_TEGRA_186_SOC=y
|
|||||||
CONFIG_EXTCON_USB_GPIO=y
|
CONFIG_EXTCON_USB_GPIO=y
|
||||||
CONFIG_IIO=y
|
CONFIG_IIO=y
|
||||||
CONFIG_EXYNOS_ADC=y
|
CONFIG_EXYNOS_ADC=y
|
||||||
|
CONFIG_ROCKCHIP_SARADC=m
|
||||||
CONFIG_PWM=y
|
CONFIG_PWM=y
|
||||||
CONFIG_PWM_BCM2835=m
|
CONFIG_PWM_BCM2835=m
|
||||||
|
CONFIG_PWM_CROS_EC=m
|
||||||
CONFIG_PWM_MESON=m
|
CONFIG_PWM_MESON=m
|
||||||
CONFIG_PWM_ROCKCHIP=y
|
CONFIG_PWM_ROCKCHIP=y
|
||||||
CONFIG_PWM_SAMSUNG=y
|
CONFIG_PWM_SAMSUNG=y
|
||||||
@ -484,6 +493,7 @@ CONFIG_PHY_HI6220_USB=y
|
|||||||
CONFIG_PHY_SUN4I_USB=y
|
CONFIG_PHY_SUN4I_USB=y
|
||||||
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
|
CONFIG_PHY_ROCKCHIP_INNO_USB2=y
|
||||||
CONFIG_PHY_ROCKCHIP_EMMC=y
|
CONFIG_PHY_ROCKCHIP_EMMC=y
|
||||||
|
CONFIG_PHY_ROCKCHIP_PCIE=m
|
||||||
CONFIG_PHY_XGENE=y
|
CONFIG_PHY_XGENE=y
|
||||||
CONFIG_PHY_TEGRA_XUSB=y
|
CONFIG_PHY_TEGRA_XUSB=y
|
||||||
CONFIG_ARM_SCPI_PROTOCOL=y
|
CONFIG_ARM_SCPI_PROTOCOL=y
|
||||||
|
@ -24,8 +24,8 @@
|
|||||||
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
|
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
|
||||||
|
|
||||||
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
#define BAD_MADT_GICC_ENTRY(entry, end) \
|
||||||
(!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
|
(!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
|
||||||
(entry)->header.length != ACPI_MADT_GICC_LENGTH)
|
(unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
|
||||||
|
|
||||||
/* Basic configuration for ACPI */
|
/* Basic configuration for ACPI */
|
||||||
#ifdef CONFIG_ACPI
|
#ifdef CONFIG_ACPI
|
||||||
|
@ -286,6 +286,10 @@
|
|||||||
#define SCTLR_ELx_A (1 << 1)
|
#define SCTLR_ELx_A (1 << 1)
|
||||||
#define SCTLR_ELx_M 1
|
#define SCTLR_ELx_M 1
|
||||||
|
|
||||||
|
#define SCTLR_EL2_RES1 ((1 << 4) | (1 << 5) | (1 << 11) | (1 << 16) | \
|
||||||
|
(1 << 16) | (1 << 18) | (1 << 22) | (1 << 23) | \
|
||||||
|
(1 << 28) | (1 << 29))
|
||||||
|
|
||||||
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
#define SCTLR_ELx_FLAGS (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
|
||||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||||
|
|
||||||
|
@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
|
root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node);
|
||||||
if (!root_ops)
|
if (!root_ops) {
|
||||||
|
kfree(ri);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
ri->cfg = pci_acpi_setup_ecam_mapping(root);
|
ri->cfg = pci_acpi_setup_ecam_mapping(root);
|
||||||
if (!ri->cfg) {
|
if (!ri->cfg) {
|
||||||
|
@ -106,10 +106,13 @@ __do_hyp_init:
|
|||||||
tlbi alle2
|
tlbi alle2
|
||||||
dsb sy
|
dsb sy
|
||||||
|
|
||||||
mrs x4, sctlr_el2
|
/*
|
||||||
and x4, x4, #SCTLR_ELx_EE // preserve endianness of EL2
|
* Preserve all the RES1 bits while setting the default flags,
|
||||||
ldr x5, =SCTLR_ELx_FLAGS
|
* as well as the EE bit on BE. Drop the A flag since the compiler
|
||||||
orr x4, x4, x5
|
* is allowed to generate unaligned accesses.
|
||||||
|
*/
|
||||||
|
ldr x4, =(SCTLR_EL2_RES1 | (SCTLR_ELx_FLAGS & ~SCTLR_ELx_A))
|
||||||
|
CPU_BE( orr x4, x4, #SCTLR_ELx_EE)
|
||||||
msr sctlr_el2, x4
|
msr sctlr_el2, x4
|
||||||
isb
|
isb
|
||||||
|
|
||||||
|
@ -65,8 +65,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||||||
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
|
* Here set VMCR.CTLR in ICC_CTLR_EL1 layout.
|
||||||
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
|
* The vgic_set_vmcr() will convert to ICH_VMCR layout.
|
||||||
*/
|
*/
|
||||||
vmcr.ctlr = val & ICC_CTLR_EL1_CBPR_MASK;
|
vmcr.cbpr = (val & ICC_CTLR_EL1_CBPR_MASK) >> ICC_CTLR_EL1_CBPR_SHIFT;
|
||||||
vmcr.ctlr |= val & ICC_CTLR_EL1_EOImode_MASK;
|
vmcr.eoim = (val & ICC_CTLR_EL1_EOImode_MASK) >> ICC_CTLR_EL1_EOImode_SHIFT;
|
||||||
vgic_set_vmcr(vcpu, &vmcr);
|
vgic_set_vmcr(vcpu, &vmcr);
|
||||||
} else {
|
} else {
|
||||||
val = 0;
|
val = 0;
|
||||||
@ -83,8 +83,8 @@ static bool access_gic_ctlr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||||||
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
|
* The VMCR.CTLR value is in ICC_CTLR_EL1 layout.
|
||||||
* Extract it directly using ICC_CTLR_EL1 reg definitions.
|
* Extract it directly using ICC_CTLR_EL1 reg definitions.
|
||||||
*/
|
*/
|
||||||
val |= vmcr.ctlr & ICC_CTLR_EL1_CBPR_MASK;
|
val |= (vmcr.cbpr << ICC_CTLR_EL1_CBPR_SHIFT) & ICC_CTLR_EL1_CBPR_MASK;
|
||||||
val |= vmcr.ctlr & ICC_CTLR_EL1_EOImode_MASK;
|
val |= (vmcr.eoim << ICC_CTLR_EL1_EOImode_SHIFT) & ICC_CTLR_EL1_EOImode_MASK;
|
||||||
|
|
||||||
p->regval = val;
|
p->regval = val;
|
||||||
}
|
}
|
||||||
@ -135,7 +135,7 @@ static bool access_gic_bpr1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||||||
p->regval = 0;
|
p->regval = 0;
|
||||||
|
|
||||||
vgic_get_vmcr(vcpu, &vmcr);
|
vgic_get_vmcr(vcpu, &vmcr);
|
||||||
if (!((vmcr.ctlr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT)) {
|
if (!vmcr.cbpr) {
|
||||||
if (p->is_write) {
|
if (p->is_write) {
|
||||||
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
|
vmcr.abpr = (p->regval & ICC_BPR1_EL1_MASK) >>
|
||||||
ICC_BPR1_EL1_SHIFT;
|
ICC_BPR1_EL1_SHIFT;
|
||||||
|
@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void)
|
|||||||
#define vxtime_lock() do {} while (0)
|
#define vxtime_lock() do {} while (0)
|
||||||
#define vxtime_unlock() do {} while (0)
|
#define vxtime_unlock() do {} while (0)
|
||||||
|
|
||||||
|
/* This attribute is used in include/linux/jiffies.h alongside with
|
||||||
|
* __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp
|
||||||
|
* for frv does not contain another section specification.
|
||||||
|
*/
|
||||||
|
#define __jiffy_arch_data __attribute__((__section__(".data")))
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -37,15 +37,14 @@ __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count)
|
|||||||
long uncleared;
|
long uncleared;
|
||||||
|
|
||||||
while (count > PAGE_SIZE) {
|
while (count > PAGE_SIZE) {
|
||||||
uncleared = __copy_to_user_hexagon(dest, &empty_zero_page,
|
uncleared = raw_copy_to_user(dest, &empty_zero_page, PAGE_SIZE);
|
||||||
PAGE_SIZE);
|
|
||||||
if (uncleared)
|
if (uncleared)
|
||||||
return count - (PAGE_SIZE - uncleared);
|
return count - (PAGE_SIZE - uncleared);
|
||||||
count -= PAGE_SIZE;
|
count -= PAGE_SIZE;
|
||||||
dest += PAGE_SIZE;
|
dest += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
if (count)
|
if (count)
|
||||||
count = __copy_to_user_hexagon(dest, &empty_zero_page, count);
|
count = raw_copy_to_user(dest, &empty_zero_page, count);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
|||||||
struct thread_info *ti = task_thread_info(p);
|
struct thread_info *ti = task_thread_info(p);
|
||||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||||
unsigned long childksp;
|
unsigned long childksp;
|
||||||
p->set_child_tid = p->clear_child_tid = NULL;
|
|
||||||
|
|
||||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||||
|
|
||||||
|
@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||||||
|
|
||||||
top_of_kernel_stack = sp;
|
top_of_kernel_stack = sp;
|
||||||
|
|
||||||
p->set_child_tid = p->clear_child_tid = NULL;
|
|
||||||
|
|
||||||
/* Locate userspace context on stack... */
|
/* Locate userspace context on stack... */
|
||||||
sp -= STACK_FRAME_OVERHEAD; /* redzone */
|
sp -= STACK_FRAME_OVERHEAD; /* redzone */
|
||||||
sp -= sizeof(struct pt_regs);
|
sp -= sizeof(struct pt_regs);
|
||||||
|
@ -380,22 +380,6 @@ source "arch/powerpc/platforms/Kconfig"
|
|||||||
|
|
||||||
menu "Kernel options"
|
menu "Kernel options"
|
||||||
|
|
||||||
config PPC_DT_CPU_FTRS
|
|
||||||
bool "Device-tree based CPU feature discovery & setup"
|
|
||||||
depends on PPC_BOOK3S_64
|
|
||||||
default n
|
|
||||||
help
|
|
||||||
This enables code to use a new device tree binding for describing CPU
|
|
||||||
compatibility and features. Saying Y here will attempt to use the new
|
|
||||||
binding if the firmware provides it. Currently only the skiboot
|
|
||||||
firmware provides this binding.
|
|
||||||
If you're not sure say Y.
|
|
||||||
|
|
||||||
config PPC_CPUFEATURES_ENABLE_UNKNOWN
|
|
||||||
bool "cpufeatures pass through unknown features to guest/userspace"
|
|
||||||
depends on PPC_DT_CPU_FTRS
|
|
||||||
default y
|
|
||||||
|
|
||||||
config HIGHMEM
|
config HIGHMEM
|
||||||
bool "High memory support"
|
bool "High memory support"
|
||||||
depends on PPC32
|
depends on PPC32
|
||||||
@ -1215,11 +1199,6 @@ source "arch/powerpc/Kconfig.debug"
|
|||||||
|
|
||||||
source "security/Kconfig"
|
source "security/Kconfig"
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
bool
|
|
||||||
depends on COMPAT && KEYS
|
|
||||||
default y
|
|
||||||
|
|
||||||
source "crypto/Kconfig"
|
source "crypto/Kconfig"
|
||||||
|
|
||||||
config PPC_LIB_RHEAP
|
config PPC_LIB_RHEAP
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#define H_PTE_INDEX_SIZE 9
|
#define H_PTE_INDEX_SIZE 9
|
||||||
#define H_PMD_INDEX_SIZE 7
|
#define H_PMD_INDEX_SIZE 7
|
||||||
#define H_PUD_INDEX_SIZE 9
|
#define H_PUD_INDEX_SIZE 9
|
||||||
#define H_PGD_INDEX_SIZE 12
|
#define H_PGD_INDEX_SIZE 9
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
#define H_PTE_TABLE_SIZE (sizeof(pte_t) << H_PTE_INDEX_SIZE)
|
||||||
|
@ -214,7 +214,6 @@ enum {
|
|||||||
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
#define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000)
|
||||||
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
#define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000)
|
||||||
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
#define CPU_FTR_PMAO_BUG LONG_ASM_CONST(0x1000000000000000)
|
||||||
#define CPU_FTR_SUBCORE LONG_ASM_CONST(0x2000000000000000)
|
|
||||||
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
#define CPU_FTR_POWER9_DD1 LONG_ASM_CONST(0x4000000000000000)
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
@ -463,7 +462,7 @@ enum {
|
|||||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||||
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||||
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP | CPU_FTR_SUBCORE)
|
CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
|
||||||
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
#define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
|
||||||
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
|
||||||
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
#define CPU_FTRS_POWER9 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||||
|
@ -110,13 +110,18 @@ void release_thread(struct task_struct *);
|
|||||||
#define TASK_SIZE_128TB (0x0000800000000000UL)
|
#define TASK_SIZE_128TB (0x0000800000000000UL)
|
||||||
#define TASK_SIZE_512TB (0x0002000000000000UL)
|
#define TASK_SIZE_512TB (0x0002000000000000UL)
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
/*
|
||||||
|
* For now 512TB is only supported with book3s and 64K linux page size.
|
||||||
|
*/
|
||||||
|
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_64K_PAGES)
|
||||||
/*
|
/*
|
||||||
* Max value currently used:
|
* Max value currently used:
|
||||||
*/
|
*/
|
||||||
#define TASK_SIZE_USER64 TASK_SIZE_512TB
|
#define TASK_SIZE_USER64 TASK_SIZE_512TB
|
||||||
|
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_128TB
|
||||||
#else
|
#else
|
||||||
#define TASK_SIZE_USER64 TASK_SIZE_64TB
|
#define TASK_SIZE_USER64 TASK_SIZE_64TB
|
||||||
|
#define DEFAULT_MAP_WINDOW_USER64 TASK_SIZE_64TB
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -132,7 +137,7 @@ void release_thread(struct task_struct *);
|
|||||||
* space during mmap's.
|
* space during mmap's.
|
||||||
*/
|
*/
|
||||||
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
|
||||||
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_128TB / 4))
|
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
|
||||||
|
|
||||||
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
#define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \
|
||||||
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 )
|
||||||
@ -144,20 +149,14 @@ void release_thread(struct task_struct *);
|
|||||||
*/
|
*/
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
|
#define DEFAULT_MAP_WINDOW ((is_32bit_task()) ? \
|
||||||
TASK_SIZE_USER32 : TASK_SIZE_128TB)
|
TASK_SIZE_USER32 : DEFAULT_MAP_WINDOW_USER64)
|
||||||
#else
|
#else
|
||||||
#define DEFAULT_MAP_WINDOW TASK_SIZE
|
#define DEFAULT_MAP_WINDOW TASK_SIZE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3S_64
|
#define STACK_TOP_USER64 DEFAULT_MAP_WINDOW_USER64
|
||||||
/* Limit stack to 128TB */
|
|
||||||
#define STACK_TOP_USER64 TASK_SIZE_128TB
|
|
||||||
#else
|
|
||||||
#define STACK_TOP_USER64 TASK_SIZE_USER64
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
#define STACK_TOP_USER32 TASK_SIZE_USER32
|
||||||
|
|
||||||
#define STACK_TOP (is_32bit_task() ? \
|
#define STACK_TOP (is_32bit_task() ? \
|
||||||
|
@ -44,8 +44,22 @@ extern void __init dump_numa_cpu_topology(void);
|
|||||||
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
||||||
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
||||||
|
|
||||||
|
static inline int early_cpu_to_node(int cpu)
|
||||||
|
{
|
||||||
|
int nid;
|
||||||
|
|
||||||
|
nid = numa_cpu_lookup_table[cpu];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fall back to node 0 if nid is unset (it should be, except bugs).
|
||||||
|
* This allows callers to safely do NODE_DATA(early_cpu_to_node(cpu)).
|
||||||
|
*/
|
||||||
|
return (nid < 0) ? 0 : nid;
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
|
|
||||||
|
static inline int early_cpu_to_node(int cpu) { return 0; }
|
||||||
|
|
||||||
static inline void dump_numa_cpu_topology(void) {}
|
static inline void dump_numa_cpu_topology(void) {}
|
||||||
|
|
||||||
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
static inline int sysfs_add_device_to_node(struct device *dev, int nid)
|
||||||
|
@ -46,6 +46,8 @@
|
|||||||
#define PPC_FEATURE2_HTM_NOSC 0x01000000
|
#define PPC_FEATURE2_HTM_NOSC 0x01000000
|
||||||
#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
|
#define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */
|
||||||
#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
|
#define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */
|
||||||
|
#define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */
|
||||||
|
#define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* IMPORTANT!
|
* IMPORTANT!
|
||||||
|
@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void);
|
|||||||
#define COMMON_USER_POWER9 COMMON_USER_POWER8
|
#define COMMON_USER_POWER9 COMMON_USER_POWER8
|
||||||
#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
|
#define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \
|
||||||
PPC_FEATURE2_ARCH_3_00 | \
|
PPC_FEATURE2_ARCH_3_00 | \
|
||||||
PPC_FEATURE2_HAS_IEEE128)
|
PPC_FEATURE2_HAS_IEEE128 | \
|
||||||
|
PPC_FEATURE2_DARN )
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_BOOK3E_64
|
#ifdef CONFIG_PPC_BOOK3E_64
|
||||||
#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
|
#define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE)
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/jump_label.h>
|
#include <linux/jump_label.h>
|
||||||
|
#include <linux/libfdt.h>
|
||||||
#include <linux/memblock.h>
|
#include <linux/memblock.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
@ -642,7 +643,6 @@ static struct dt_cpu_feature_match __initdata
|
|||||||
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
|
{"processor-control-facility", feat_enable_dbell, CPU_FTR_DBELL},
|
||||||
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
||||||
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
||||||
{"subcore", feat_enable, CPU_FTR_SUBCORE},
|
|
||||||
{"no-execute", feat_enable, 0},
|
{"no-execute", feat_enable, 0},
|
||||||
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
||||||
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
||||||
@ -671,12 +671,24 @@ static struct dt_cpu_feature_match __initdata
|
|||||||
{"wait-v3", feat_enable, 0},
|
{"wait-v3", feat_enable, 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
/* XXX: how to configure this? Default + boot time? */
|
static bool __initdata using_dt_cpu_ftrs;
|
||||||
#ifdef CONFIG_PPC_CPUFEATURES_ENABLE_UNKNOWN
|
static bool __initdata enable_unknown = true;
|
||||||
#define CPU_FEATURE_ENABLE_UNKNOWN 1
|
|
||||||
#else
|
static int __init dt_cpu_ftrs_parse(char *str)
|
||||||
#define CPU_FEATURE_ENABLE_UNKNOWN 0
|
{
|
||||||
#endif
|
if (!str)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!strcmp(str, "off"))
|
||||||
|
using_dt_cpu_ftrs = false;
|
||||||
|
else if (!strcmp(str, "known"))
|
||||||
|
enable_unknown = false;
|
||||||
|
else
|
||||||
|
return 1;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("dt_cpu_ftrs", dt_cpu_ftrs_parse);
|
||||||
|
|
||||||
static void __init cpufeatures_setup_start(u32 isa)
|
static void __init cpufeatures_setup_start(u32 isa)
|
||||||
{
|
{
|
||||||
@ -707,7 +719,7 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!known && CPU_FEATURE_ENABLE_UNKNOWN) {
|
if (!known && enable_unknown) {
|
||||||
if (!feat_try_enable_unknown(f)) {
|
if (!feat_try_enable_unknown(f)) {
|
||||||
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
|
pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
|
||||||
f->name);
|
f->name);
|
||||||
@ -756,6 +768,26 @@ static void __init cpufeatures_setup_finished(void)
|
|||||||
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
|
cur_cpu_spec->cpu_features, cur_cpu_spec->mmu_features);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init disabled_on_cmdline(void)
|
||||||
|
{
|
||||||
|
unsigned long root, chosen;
|
||||||
|
const char *p;
|
||||||
|
|
||||||
|
root = of_get_flat_dt_root();
|
||||||
|
chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
|
||||||
|
if (chosen == -FDT_ERR_NOTFOUND)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
p = of_get_flat_dt_prop(chosen, "bootargs", NULL);
|
||||||
|
if (!p)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
if (strstr(p, "dt_cpu_ftrs=off"))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
||||||
int depth, void *data)
|
int depth, void *data)
|
||||||
{
|
{
|
||||||
@ -766,8 +798,6 @@ static int __init fdt_find_cpu_features(unsigned long node, const char *uname,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __initdata using_dt_cpu_ftrs = false;
|
|
||||||
|
|
||||||
bool __init dt_cpu_ftrs_in_use(void)
|
bool __init dt_cpu_ftrs_in_use(void)
|
||||||
{
|
{
|
||||||
return using_dt_cpu_ftrs;
|
return using_dt_cpu_ftrs;
|
||||||
@ -775,6 +805,8 @@ bool __init dt_cpu_ftrs_in_use(void)
|
|||||||
|
|
||||||
bool __init dt_cpu_ftrs_init(void *fdt)
|
bool __init dt_cpu_ftrs_init(void *fdt)
|
||||||
{
|
{
|
||||||
|
using_dt_cpu_ftrs = false;
|
||||||
|
|
||||||
/* Setup and verify the FDT, if it fails we just bail */
|
/* Setup and verify the FDT, if it fails we just bail */
|
||||||
if (!early_init_dt_verify(fdt))
|
if (!early_init_dt_verify(fdt))
|
||||||
return false;
|
return false;
|
||||||
@ -782,6 +814,9 @@ bool __init dt_cpu_ftrs_init(void *fdt)
|
|||||||
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
|
if (!of_scan_flat_dt(fdt_find_cpu_features, NULL))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (disabled_on_cmdline())
|
||||||
|
return false;
|
||||||
|
|
||||||
cpufeatures_setup_cpu();
|
cpufeatures_setup_cpu();
|
||||||
|
|
||||||
using_dt_cpu_ftrs = true;
|
using_dt_cpu_ftrs = true;
|
||||||
@ -1027,5 +1062,8 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char
|
|||||||
|
|
||||||
void __init dt_cpu_ftrs_scan(void)
|
void __init dt_cpu_ftrs_scan(void)
|
||||||
{
|
{
|
||||||
|
if (!using_dt_cpu_ftrs)
|
||||||
|
return;
|
||||||
|
|
||||||
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
|
of_scan_flat_dt(dt_cpu_ftrs_scan_callback, NULL);
|
||||||
}
|
}
|
||||||
|
@ -1666,6 +1666,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||||||
#ifdef CONFIG_VSX
|
#ifdef CONFIG_VSX
|
||||||
current->thread.used_vsr = 0;
|
current->thread.used_vsr = 0;
|
||||||
#endif
|
#endif
|
||||||
|
current->thread.load_fp = 0;
|
||||||
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
|
memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
|
||||||
current->thread.fp_save_area = NULL;
|
current->thread.fp_save_area = NULL;
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
@ -1674,6 +1675,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||||||
current->thread.vr_save_area = NULL;
|
current->thread.vr_save_area = NULL;
|
||||||
current->thread.vrsave = 0;
|
current->thread.vrsave = 0;
|
||||||
current->thread.used_vr = 0;
|
current->thread.used_vr = 0;
|
||||||
|
current->thread.load_vec = 0;
|
||||||
#endif /* CONFIG_ALTIVEC */
|
#endif /* CONFIG_ALTIVEC */
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
memset(current->thread.evr, 0, sizeof(current->thread.evr));
|
memset(current->thread.evr, 0, sizeof(current->thread.evr));
|
||||||
@ -1685,6 +1687,7 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
|
|||||||
current->thread.tm_tfhar = 0;
|
current->thread.tm_tfhar = 0;
|
||||||
current->thread.tm_texasr = 0;
|
current->thread.tm_texasr = 0;
|
||||||
current->thread.tm_tfiar = 0;
|
current->thread.tm_tfiar = 0;
|
||||||
|
current->thread.load_tm = 0;
|
||||||
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(start_thread);
|
EXPORT_SYMBOL(start_thread);
|
||||||
|
@ -161,7 +161,9 @@ static struct ibm_pa_feature {
|
|||||||
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
|
{ .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
|
||||||
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
|
{ .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
|
||||||
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
|
{ .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
|
||||||
|
#ifdef CONFIG_PPC_RADIX_MMU
|
||||||
{ .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
|
{ .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
|
||||||
|
#endif
|
||||||
{ .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
|
{ .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
|
||||||
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
|
{ .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
|
||||||
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
|
.cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
|
||||||
|
@ -928,7 +928,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
#ifdef CONFIG_PPC_MM_SLICES
|
#ifdef CONFIG_PPC_MM_SLICES
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
init_mm.context.addr_limit = TASK_SIZE_128TB;
|
init_mm.context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||||
#else
|
#else
|
||||||
#error "context.addr_limit not initialized."
|
#error "context.addr_limit not initialized."
|
||||||
#endif
|
#endif
|
||||||
|
@ -661,7 +661,7 @@ void __init emergency_stack_init(void)
|
|||||||
|
|
||||||
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
|
||||||
{
|
{
|
||||||
return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align,
|
return __alloc_bootmem_node(NODE_DATA(early_cpu_to_node(cpu)), size, align,
|
||||||
__pa(MAX_DMA_ADDRESS));
|
__pa(MAX_DMA_ADDRESS));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -672,7 +672,7 @@ static void __init pcpu_fc_free(void *ptr, size_t size)
|
|||||||
|
|
||||||
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
|
||||||
{
|
{
|
||||||
if (cpu_to_node(from) == cpu_to_node(to))
|
if (early_cpu_to_node(from) == early_cpu_to_node(to))
|
||||||
return LOCAL_DISTANCE;
|
return LOCAL_DISTANCE;
|
||||||
else
|
else
|
||||||
return REMOTE_DISTANCE;
|
return REMOTE_DISTANCE;
|
||||||
|
@ -99,7 +99,7 @@ static int hash__init_new_context(struct mm_struct *mm)
|
|||||||
* mm->context.addr_limit. Default to max task size so that we copy the
|
* mm->context.addr_limit. Default to max task size so that we copy the
|
||||||
* default values to paca which will help us to handle slb miss early.
|
* default values to paca which will help us to handle slb miss early.
|
||||||
*/
|
*/
|
||||||
mm->context.addr_limit = TASK_SIZE_128TB;
|
mm->context.addr_limit = DEFAULT_MAP_WINDOW_USER64;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The old code would re-promote on fork, we don't do that when using
|
* The old code would re-promote on fork, we don't do that when using
|
||||||
|
@ -402,7 +402,7 @@ static struct power_pmu power9_isa207_pmu = {
|
|||||||
.name = "POWER9",
|
.name = "POWER9",
|
||||||
.n_counter = MAX_PMU_COUNTERS,
|
.n_counter = MAX_PMU_COUNTERS,
|
||||||
.add_fields = ISA207_ADD_FIELDS,
|
.add_fields = ISA207_ADD_FIELDS,
|
||||||
.test_adder = ISA207_TEST_ADDER,
|
.test_adder = P9_DD1_TEST_ADDER,
|
||||||
.compute_mmcr = isa207_compute_mmcr,
|
.compute_mmcr = isa207_compute_mmcr,
|
||||||
.config_bhrb = power9_config_bhrb,
|
.config_bhrb = power9_config_bhrb,
|
||||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||||
@ -421,7 +421,7 @@ static struct power_pmu power9_pmu = {
|
|||||||
.name = "POWER9",
|
.name = "POWER9",
|
||||||
.n_counter = MAX_PMU_COUNTERS,
|
.n_counter = MAX_PMU_COUNTERS,
|
||||||
.add_fields = ISA207_ADD_FIELDS,
|
.add_fields = ISA207_ADD_FIELDS,
|
||||||
.test_adder = P9_DD1_TEST_ADDER,
|
.test_adder = ISA207_TEST_ADDER,
|
||||||
.compute_mmcr = isa207_compute_mmcr,
|
.compute_mmcr = isa207_compute_mmcr,
|
||||||
.config_bhrb = power9_config_bhrb,
|
.config_bhrb = power9_config_bhrb,
|
||||||
.bhrb_filter_map = power9_bhrb_filter_map,
|
.bhrb_filter_map = power9_bhrb_filter_map,
|
||||||
|
@ -59,6 +59,17 @@ config PPC_OF_BOOT_TRAMPOLINE
|
|||||||
|
|
||||||
In case of doubt, say Y
|
In case of doubt, say Y
|
||||||
|
|
||||||
|
config PPC_DT_CPU_FTRS
|
||||||
|
bool "Device-tree based CPU feature discovery & setup"
|
||||||
|
depends on PPC_BOOK3S_64
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This enables code to use a new device tree binding for describing CPU
|
||||||
|
compatibility and features. Saying Y here will attempt to use the new
|
||||||
|
binding if the firmware provides it. Currently only the skiboot
|
||||||
|
firmware provides this binding.
|
||||||
|
If you're not sure say Y.
|
||||||
|
|
||||||
config UDBG_RTAS_CONSOLE
|
config UDBG_RTAS_CONSOLE
|
||||||
bool "RTAS based debug console"
|
bool "RTAS based debug console"
|
||||||
depends on PPC_RTAS
|
depends on PPC_RTAS
|
||||||
|
@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
|
|||||||
(REGION_ID(ea) != USER_REGION_ID)) {
|
(REGION_ID(ea) != USER_REGION_ID)) {
|
||||||
|
|
||||||
spin_unlock(&spu->register_lock);
|
spin_unlock(&spu->register_lock);
|
||||||
ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr);
|
ret = hash_page(ea,
|
||||||
|
_PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED,
|
||||||
|
0x300, dsisr);
|
||||||
spin_lock(&spu->register_lock);
|
spin_lock(&spu->register_lock);
|
||||||
|
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
@ -175,6 +175,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
|
|||||||
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
|
skip = roundup(cprm->pos - total + sz, 4) - cprm->pos;
|
||||||
if (!dump_skip(cprm, skip))
|
if (!dump_skip(cprm, skip))
|
||||||
goto Eio;
|
goto Eio;
|
||||||
|
|
||||||
|
rc = 0;
|
||||||
out:
|
out:
|
||||||
free_page((unsigned long)buf);
|
free_page((unsigned long)buf);
|
||||||
return rc;
|
return rc;
|
||||||
|
@ -714,7 +714,7 @@ static void pnv_npu2_release_context(struct kref *kref)
|
|||||||
void pnv_npu2_destroy_context(struct npu_context *npu_context,
|
void pnv_npu2_destroy_context(struct npu_context *npu_context,
|
||||||
struct pci_dev *gpdev)
|
struct pci_dev *gpdev)
|
||||||
{
|
{
|
||||||
struct pnv_phb *nphb, *phb;
|
struct pnv_phb *nphb;
|
||||||
struct npu *npu;
|
struct npu *npu;
|
||||||
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
|
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
|
||||||
struct device_node *nvlink_dn;
|
struct device_node *nvlink_dn;
|
||||||
@ -728,13 +728,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
|
|||||||
|
|
||||||
nphb = pci_bus_to_host(npdev->bus)->private_data;
|
nphb = pci_bus_to_host(npdev->bus)->private_data;
|
||||||
npu = &nphb->npu;
|
npu = &nphb->npu;
|
||||||
phb = pci_bus_to_host(gpdev->bus)->private_data;
|
|
||||||
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
|
nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0);
|
||||||
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
|
if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index",
|
||||||
&nvlink_index)))
|
&nvlink_index)))
|
||||||
return;
|
return;
|
||||||
npu_context->npdev[npu->index][nvlink_index] = NULL;
|
npu_context->npdev[npu->index][nvlink_index] = NULL;
|
||||||
opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id,
|
opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
|
||||||
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
|
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
|
||||||
kref_put(&npu_context->kref, pnv_npu2_release_context);
|
kref_put(&npu_context->kref, pnv_npu2_release_context);
|
||||||
}
|
}
|
||||||
|
@ -407,7 +407,13 @@ static DEVICE_ATTR(subcores_per_core, 0644,
|
|||||||
|
|
||||||
static int subcore_init(void)
|
static int subcore_init(void)
|
||||||
{
|
{
|
||||||
if (!cpu_has_feature(CPU_FTR_SUBCORE))
|
unsigned pvr_ver;
|
||||||
|
|
||||||
|
pvr_ver = PVR_VER(mfspr(SPRN_PVR));
|
||||||
|
|
||||||
|
if (pvr_ver != PVR_POWER8 &&
|
||||||
|
pvr_ver != PVR_POWER8E &&
|
||||||
|
pvr_ver != PVR_POWER8NVL)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -124,6 +124,7 @@ static struct property *dlpar_clone_drconf_property(struct device_node *dn)
|
|||||||
for (i = 0; i < num_lmbs; i++) {
|
for (i = 0; i < num_lmbs; i++) {
|
||||||
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
|
||||||
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
|
||||||
|
lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
|
||||||
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -147,6 +148,7 @@ static void dlpar_update_drconf_property(struct device_node *dn,
|
|||||||
for (i = 0; i < num_lmbs; i++) {
|
for (i = 0; i < num_lmbs; i++) {
|
||||||
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
|
||||||
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
|
||||||
|
lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
|
||||||
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,8 @@ static int u8_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
|||||||
|
|
||||||
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
static void u8_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||||
{
|
{
|
||||||
struct u8_gpio_chip *u8_gc = gpiochip_get_data(&mm_gc->gc);
|
struct u8_gpio_chip *u8_gc =
|
||||||
|
container_of(mm_gc, struct u8_gpio_chip, mm_gc);
|
||||||
|
|
||||||
u8_gc->data = in_8(mm_gc->regs);
|
u8_gc->data = in_8(mm_gc->regs);
|
||||||
}
|
}
|
||||||
|
@ -363,9 +363,6 @@ config COMPAT
|
|||||||
config SYSVIPC_COMPAT
|
config SYSVIPC_COMPAT
|
||||||
def_bool y if COMPAT && SYSVIPC
|
def_bool y if COMPAT && SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y if COMPAT && KEYS
|
|
||||||
|
|
||||||
config SMP
|
config SMP
|
||||||
def_bool y
|
def_bool y
|
||||||
prompt "Symmetric multi-processing support"
|
prompt "Symmetric multi-processing support"
|
||||||
|
@ -541,7 +541,6 @@ struct kvm_s390_float_interrupt {
|
|||||||
struct mutex ais_lock;
|
struct mutex ais_lock;
|
||||||
u8 simm;
|
u8 simm;
|
||||||
u8 nimm;
|
u8 nimm;
|
||||||
int ais_enabled;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct kvm_hw_wp_info_arch {
|
struct kvm_hw_wp_info_arch {
|
||||||
|
@ -2160,7 +2160,7 @@ static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
|
|||||||
struct kvm_s390_ais_req req;
|
struct kvm_s390_ais_req req;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!fi->ais_enabled)
|
if (!test_kvm_facility(kvm, 72))
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
|
|
||||||
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
|
||||||
@ -2204,7 +2204,7 @@ static int kvm_s390_inject_airq(struct kvm *kvm,
|
|||||||
};
|
};
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!fi->ais_enabled || !adapter->suppressible)
|
if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
|
||||||
return kvm_s390_inject_vm(kvm, &s390int);
|
return kvm_s390_inject_vm(kvm, &s390int);
|
||||||
|
|
||||||
mutex_lock(&fi->ais_lock);
|
mutex_lock(&fi->ais_lock);
|
||||||
|
@ -558,7 +558,6 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
|
|||||||
} else {
|
} else {
|
||||||
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
set_kvm_facility(kvm->arch.model.fac_mask, 72);
|
||||||
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
set_kvm_facility(kvm->arch.model.fac_list, 72);
|
||||||
kvm->arch.float_int.ais_enabled = 1;
|
|
||||||
r = 0;
|
r = 0;
|
||||||
}
|
}
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
@ -1533,7 +1532,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||||||
mutex_init(&kvm->arch.float_int.ais_lock);
|
mutex_init(&kvm->arch.float_int.ais_lock);
|
||||||
kvm->arch.float_int.simm = 0;
|
kvm->arch.float_int.simm = 0;
|
||||||
kvm->arch.float_int.nimm = 0;
|
kvm->arch.float_int.nimm = 0;
|
||||||
kvm->arch.float_int.ais_enabled = 0;
|
|
||||||
spin_lock_init(&kvm->arch.float_int.lock);
|
spin_lock_init(&kvm->arch.float_int.lock);
|
||||||
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
for (i = 0; i < FIRQ_LIST_COUNT; i++)
|
||||||
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
|
||||||
|
@ -192,9 +192,9 @@ config NR_CPUS
|
|||||||
int "Maximum number of CPUs"
|
int "Maximum number of CPUs"
|
||||||
depends on SMP
|
depends on SMP
|
||||||
range 2 32 if SPARC32
|
range 2 32 if SPARC32
|
||||||
range 2 1024 if SPARC64
|
range 2 4096 if SPARC64
|
||||||
default 32 if SPARC32
|
default 32 if SPARC32
|
||||||
default 64 if SPARC64
|
default 4096 if SPARC64
|
||||||
|
|
||||||
source kernel/Kconfig.hz
|
source kernel/Kconfig.hz
|
||||||
|
|
||||||
@ -295,9 +295,13 @@ config NUMA
|
|||||||
depends on SPARC64 && SMP
|
depends on SPARC64 && SMP
|
||||||
|
|
||||||
config NODES_SHIFT
|
config NODES_SHIFT
|
||||||
int
|
int "Maximum NUMA Nodes (as a power of 2)"
|
||||||
default "4"
|
range 4 5 if SPARC64
|
||||||
|
default "5"
|
||||||
depends on NEED_MULTIPLE_NODES
|
depends on NEED_MULTIPLE_NODES
|
||||||
|
help
|
||||||
|
Specify the maximum number of NUMA Nodes available on the target
|
||||||
|
system. Increases memory reserved to accommodate various tables.
|
||||||
|
|
||||||
# Some NUMA nodes have memory ranges that span
|
# Some NUMA nodes have memory ranges that span
|
||||||
# other nodes. Even though a pfn is valid and
|
# other nodes. Even though a pfn is valid and
|
||||||
@ -573,9 +577,6 @@ config SYSVIPC_COMPAT
|
|||||||
depends on COMPAT && SYSVIPC
|
depends on COMPAT && SYSVIPC
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y if COMPAT && KEYS
|
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
source "net/Kconfig"
|
source "net/Kconfig"
|
||||||
|
@ -52,7 +52,7 @@
|
|||||||
#define CTX_NR_MASK TAG_CONTEXT_BITS
|
#define CTX_NR_MASK TAG_CONTEXT_BITS
|
||||||
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
|
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
|
||||||
|
|
||||||
#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
|
#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
|
||||||
#define CTX_VALID(__ctx) \
|
#define CTX_VALID(__ctx) \
|
||||||
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
|
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
|
||||||
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
|
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
|
||||||
|
@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock;
|
|||||||
extern unsigned long tlb_context_cache;
|
extern unsigned long tlb_context_cache;
|
||||||
extern unsigned long mmu_context_bmap[];
|
extern unsigned long mmu_context_bmap[];
|
||||||
|
|
||||||
|
DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
|
||||||
void get_new_mmu_context(struct mm_struct *mm);
|
void get_new_mmu_context(struct mm_struct *mm);
|
||||||
#ifdef CONFIG_SMP
|
|
||||||
void smp_new_mmu_context_version(void);
|
|
||||||
#else
|
|
||||||
#define smp_new_mmu_context_version() do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||||
void destroy_context(struct mm_struct *mm);
|
void destroy_context(struct mm_struct *mm);
|
||||||
|
|
||||||
@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
|
|||||||
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
|
||||||
{
|
{
|
||||||
unsigned long ctx_valid, flags;
|
unsigned long ctx_valid, flags;
|
||||||
int cpu;
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
|
per_cpu(per_cpu_secondary_mm, cpu) = mm;
|
||||||
if (unlikely(mm == &init_mm))
|
if (unlikely(mm == &init_mm))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|||||||
* for the first time, we must flush that context out of the
|
* for the first time, we must flush that context out of the
|
||||||
* local TLB.
|
* local TLB.
|
||||||
*/
|
*/
|
||||||
cpu = smp_processor_id();
|
|
||||||
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
||||||
@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||||
|
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
|
||||||
/* Activate a new MM instance for the current task. */
|
|
||||||
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
|
|
||||||
{
|
|
||||||
unsigned long flags;
|
|
||||||
int cpu;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mm->context.lock, flags);
|
|
||||||
if (!CTX_VALID(mm->context))
|
|
||||||
get_new_mmu_context(mm);
|
|
||||||
cpu = smp_processor_id();
|
|
||||||
if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
|
|
||||||
cpumask_set_cpu(cpu, mm_cpumask(mm));
|
|
||||||
|
|
||||||
load_secondary_context(mm);
|
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
|
|
||||||
tsb_context_switch(mm);
|
|
||||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
#endif /* !(__ASSEMBLY__) */
|
||||||
|
|
||||||
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
#define PIL_SMP_CALL_FUNC 1
|
#define PIL_SMP_CALL_FUNC 1
|
||||||
#define PIL_SMP_RECEIVE_SIGNAL 2
|
#define PIL_SMP_RECEIVE_SIGNAL 2
|
||||||
#define PIL_SMP_CAPTURE 3
|
#define PIL_SMP_CAPTURE 3
|
||||||
#define PIL_SMP_CTX_NEW_VERSION 4
|
|
||||||
#define PIL_DEVICE_IRQ 5
|
#define PIL_DEVICE_IRQ 5
|
||||||
#define PIL_SMP_CALL_FUNC_SNGL 6
|
#define PIL_SMP_CALL_FUNC_SNGL 6
|
||||||
#define PIL_DEFERRED_PCR_WORK 7
|
#define PIL_DEFERRED_PCR_WORK 7
|
||||||
|
@ -327,6 +327,7 @@ struct vio_dev {
|
|||||||
int compat_len;
|
int compat_len;
|
||||||
|
|
||||||
u64 dev_no;
|
u64 dev_no;
|
||||||
|
u64 id;
|
||||||
|
|
||||||
unsigned long channel_id;
|
unsigned long channel_id;
|
||||||
|
|
||||||
|
@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp)
|
|||||||
pbuf.req.handle = cp->handle;
|
pbuf.req.handle = cp->handle;
|
||||||
pbuf.req.major = 1;
|
pbuf.req.major = 1;
|
||||||
pbuf.req.minor = 0;
|
pbuf.req.minor = 0;
|
||||||
strcpy(pbuf.req.svc_id, cp->service_id);
|
strcpy(pbuf.id_buf, cp->service_id);
|
||||||
|
|
||||||
err = __ds_send(lp, &pbuf, msg_len);
|
err = __ds_send(lp, &pbuf, msg_len);
|
||||||
if (err > 0)
|
if (err > 0)
|
||||||
|
@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb)
|
|||||||
{
|
{
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
unsigned long page;
|
unsigned long page;
|
||||||
|
void *mondo, *p;
|
||||||
|
|
||||||
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
|
BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
|
||||||
|
|
||||||
|
/* Make sure mondo block is 64byte aligned */
|
||||||
|
p = kzalloc(127, GFP_KERNEL);
|
||||||
|
if (!p) {
|
||||||
|
prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
|
||||||
|
prom_halt();
|
||||||
|
}
|
||||||
|
mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
|
||||||
|
tb->cpu_mondo_block_pa = __pa(mondo);
|
||||||
|
|
||||||
page = get_zeroed_page(GFP_KERNEL);
|
page = get_zeroed_page(GFP_KERNEL);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
|
prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
|
||||||
prom_halt();
|
prom_halt();
|
||||||
}
|
}
|
||||||
|
|
||||||
tb->cpu_mondo_block_pa = __pa(page);
|
tb->cpu_list_pa = __pa(page);
|
||||||
tb->cpu_list_pa = __pa(page + 64);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr
|
|||||||
/* smp_64.c */
|
/* smp_64.c */
|
||||||
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
|
|
||||||
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
|
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
|
||||||
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
|
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
|
||||||
|
|
||||||
|
@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
|
|
||||||
{
|
|
||||||
struct mm_struct *mm;
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
clear_softint(1 << irq);
|
|
||||||
|
|
||||||
/* See if we need to allocate a new TLB context because
|
|
||||||
* the version of the one we are using is now out of date.
|
|
||||||
*/
|
|
||||||
mm = current->active_mm;
|
|
||||||
if (unlikely(!mm || (mm == &init_mm)))
|
|
||||||
return;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mm->context.lock, flags);
|
|
||||||
|
|
||||||
if (unlikely(!CTX_VALID(mm->context)))
|
|
||||||
get_new_mmu_context(mm);
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&mm->context.lock, flags);
|
|
||||||
|
|
||||||
load_secondary_context(mm);
|
|
||||||
__flush_tlb_mm(CTX_HWBITS(mm->context),
|
|
||||||
SECONDARY_CONTEXT);
|
|
||||||
}
|
|
||||||
|
|
||||||
void smp_new_mmu_context_version(void)
|
|
||||||
{
|
|
||||||
smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
void kgdb_roundup_cpus(unsigned long flags)
|
void kgdb_roundup_cpus(unsigned long flags)
|
||||||
{
|
{
|
||||||
|
@ -455,13 +455,16 @@ __tsb_context_switch:
|
|||||||
.type copy_tsb,#function
|
.type copy_tsb,#function
|
||||||
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
||||||
* %o2=new_tsb_base, %o3=new_tsb_size
|
* %o2=new_tsb_base, %o3=new_tsb_size
|
||||||
|
* %o4=page_size_shift
|
||||||
*/
|
*/
|
||||||
sethi %uhi(TSB_PASS_BITS), %g7
|
sethi %uhi(TSB_PASS_BITS), %g7
|
||||||
srlx %o3, 4, %o3
|
srlx %o3, 4, %o3
|
||||||
add %o0, %o1, %g1 /* end of old tsb */
|
add %o0, %o1, %o1 /* end of old tsb */
|
||||||
sllx %g7, 32, %g7
|
sllx %g7, 32, %g7
|
||||||
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
|
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
|
||||||
|
|
||||||
|
mov %o4, %g1 /* page_size_shift */
|
||||||
|
|
||||||
661: prefetcha [%o0] ASI_N, #one_read
|
661: prefetcha [%o0] ASI_N, #one_read
|
||||||
.section .tsb_phys_patch, "ax"
|
.section .tsb_phys_patch, "ax"
|
||||||
.word 661b
|
.word 661b
|
||||||
@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
|||||||
/* This can definitely be computed faster... */
|
/* This can definitely be computed faster... */
|
||||||
srlx %o0, 4, %o5 /* Build index */
|
srlx %o0, 4, %o5 /* Build index */
|
||||||
and %o5, 511, %o5 /* Mask index */
|
and %o5, 511, %o5 /* Mask index */
|
||||||
sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
|
sllx %o5, %g1, %o5 /* Put into vaddr position */
|
||||||
or %o4, %o5, %o4 /* Full VADDR. */
|
or %o4, %o5, %o4 /* Full VADDR. */
|
||||||
srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
|
srlx %o4, %g1, %o4 /* Shift down to create index */
|
||||||
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
|
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
|
||||||
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
|
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
|
||||||
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
|
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
|
||||||
@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
|
|||||||
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
|
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
|
||||||
|
|
||||||
80: add %o0, 16, %o0
|
80: add %o0, 16, %o0
|
||||||
cmp %o0, %g1
|
cmp %o0, %o1
|
||||||
bne,pt %xcc, 90b
|
bne,pt %xcc, 90b
|
||||||
nop
|
nop
|
||||||
|
|
||||||
|
@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40)
|
|||||||
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
|
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
|
||||||
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
|
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
|
||||||
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
|
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
|
||||||
tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
|
tl0_irq4: BTRAP(0x44)
|
||||||
#else
|
#else
|
||||||
tl0_irq1: BTRAP(0x41)
|
tl0_irq1: BTRAP(0x41)
|
||||||
tl0_irq2: BTRAP(0x42)
|
tl0_irq2: BTRAP(0x42)
|
||||||
|
@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
|
|||||||
if (!id) {
|
if (!id) {
|
||||||
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
dev_set_name(&vdev->dev, "%s", bus_id_name);
|
||||||
vdev->dev_no = ~(u64)0;
|
vdev->dev_no = ~(u64)0;
|
||||||
|
vdev->id = ~(u64)0;
|
||||||
} else if (!cfg_handle) {
|
} else if (!cfg_handle) {
|
||||||
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
|
||||||
vdev->dev_no = *id;
|
vdev->dev_no = *id;
|
||||||
|
vdev->id = ~(u64)0;
|
||||||
} else {
|
} else {
|
||||||
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
|
||||||
*cfg_handle, *id);
|
*cfg_handle, *id);
|
||||||
vdev->dev_no = *cfg_handle;
|
vdev->dev_no = *cfg_handle;
|
||||||
|
vdev->id = *id;
|
||||||
}
|
}
|
||||||
|
|
||||||
vdev->dev.parent = parent;
|
vdev->dev.parent = parent;
|
||||||
@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node)
|
|||||||
(void) vio_create_one(hp, node, &root_vdev->dev);
|
(void) vio_create_one(hp, node, &root_vdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct vio_md_node_query {
|
||||||
|
const char *type;
|
||||||
|
u64 dev_no;
|
||||||
|
u64 id;
|
||||||
|
};
|
||||||
|
|
||||||
static int vio_md_node_match(struct device *dev, void *arg)
|
static int vio_md_node_match(struct device *dev, void *arg)
|
||||||
{
|
{
|
||||||
|
struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
|
||||||
struct vio_dev *vdev = to_vio_dev(dev);
|
struct vio_dev *vdev = to_vio_dev(dev);
|
||||||
|
|
||||||
if (vdev->mp == (u64) arg)
|
if (vdev->dev_no != query->dev_no)
|
||||||
return 1;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
if (vdev->id != query->id)
|
||||||
|
return 0;
|
||||||
|
if (strcmp(vdev->type, query->type))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
static void vio_remove(struct mdesc_handle *hp, u64 node)
|
||||||
{
|
{
|
||||||
|
const char *type;
|
||||||
|
const u64 *id, *cfg_handle;
|
||||||
|
u64 a;
|
||||||
|
struct vio_md_node_query query;
|
||||||
struct device *dev;
|
struct device *dev;
|
||||||
|
|
||||||
dev = device_find_child(&root_vdev->dev, (void *) node,
|
type = mdesc_get_property(hp, node, "device-type", NULL);
|
||||||
|
if (!type) {
|
||||||
|
type = mdesc_get_property(hp, node, "name", NULL);
|
||||||
|
if (!type)
|
||||||
|
type = mdesc_node_name(hp, node);
|
||||||
|
}
|
||||||
|
|
||||||
|
query.type = type;
|
||||||
|
|
||||||
|
id = mdesc_get_property(hp, node, "id", NULL);
|
||||||
|
cfg_handle = NULL;
|
||||||
|
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
|
||||||
|
u64 target;
|
||||||
|
|
||||||
|
target = mdesc_arc_target(hp, a);
|
||||||
|
cfg_handle = mdesc_get_property(hp, target,
|
||||||
|
"cfg-handle", NULL);
|
||||||
|
if (cfg_handle)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!id) {
|
||||||
|
query.dev_no = ~(u64)0;
|
||||||
|
query.id = ~(u64)0;
|
||||||
|
} else if (!cfg_handle) {
|
||||||
|
query.dev_no = *id;
|
||||||
|
query.id = ~(u64)0;
|
||||||
|
} else {
|
||||||
|
query.dev_no = *cfg_handle;
|
||||||
|
query.id = *id;
|
||||||
|
}
|
||||||
|
|
||||||
|
dev = device_find_child(&root_vdev->dev, &query,
|
||||||
vio_md_node_match);
|
vio_md_node_match);
|
||||||
if (dev) {
|
if (dev) {
|
||||||
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
|
||||||
|
|
||||||
device_unregister(dev);
|
device_unregister(dev);
|
||||||
put_device(dev);
|
put_device(dev);
|
||||||
|
} else {
|
||||||
|
if (!id)
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node.\n",
|
||||||
|
type);
|
||||||
|
else if (!cfg_handle)
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
|
||||||
|
type, *id);
|
||||||
|
else
|
||||||
|
printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
|
||||||
|
type, *cfg_handle, *id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o
|
|||||||
lib-$(CONFIG_SPARC64) += atomic_64.o
|
lib-$(CONFIG_SPARC64) += atomic_64.o
|
||||||
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
|
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
|
||||||
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
|
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
|
||||||
|
lib-$(CONFIG_SPARC64) += multi3.o
|
||||||
|
|
||||||
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
|
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
|
||||||
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
|
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
|
||||||
|
35
arch/sparc/lib/multi3.S
Normal file
35
arch/sparc/lib/multi3.S
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
#include <linux/linkage.h>
|
||||||
|
#include <asm/export.h>
|
||||||
|
|
||||||
|
.text
|
||||||
|
.align 4
|
||||||
|
ENTRY(__multi3) /* %o0 = u, %o1 = v */
|
||||||
|
mov %o1, %g1
|
||||||
|
srl %o3, 0, %g4
|
||||||
|
mulx %g4, %g1, %o1
|
||||||
|
srlx %g1, 0x20, %g3
|
||||||
|
mulx %g3, %g4, %g5
|
||||||
|
sllx %g5, 0x20, %o5
|
||||||
|
srl %g1, 0, %g4
|
||||||
|
sub %o1, %o5, %o5
|
||||||
|
srlx %o5, 0x20, %o5
|
||||||
|
addcc %g5, %o5, %g5
|
||||||
|
srlx %o3, 0x20, %o5
|
||||||
|
mulx %g4, %o5, %g4
|
||||||
|
mulx %g3, %o5, %o5
|
||||||
|
sethi %hi(0x80000000), %g3
|
||||||
|
addcc %g5, %g4, %g5
|
||||||
|
srlx %g5, 0x20, %g5
|
||||||
|
add %g3, %g3, %g3
|
||||||
|
movcc %xcc, %g0, %g3
|
||||||
|
addcc %o5, %g5, %o5
|
||||||
|
sllx %g4, 0x20, %g4
|
||||||
|
add %o1, %g4, %o1
|
||||||
|
add %o5, %g3, %g2
|
||||||
|
mulx %g1, %o2, %g1
|
||||||
|
add %g1, %g2, %g1
|
||||||
|
mulx %o0, %o3, %o0
|
||||||
|
retl
|
||||||
|
add %g1, %o0, %o0
|
||||||
|
ENDPROC(__multi3)
|
||||||
|
EXPORT_SYMBOL(__multi3)
|
@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
|
if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
|
||||||
pr_warn("hugepagesz=%llu not supported by MMU.\n",
|
hugetlb_bad_size();
|
||||||
|
pr_err("hugepagesz=%llu not supported by MMU.\n",
|
||||||
hugepage_size);
|
hugepage_size);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range);
|
|||||||
|
|
||||||
/* get_new_mmu_context() uses "cache + 1". */
|
/* get_new_mmu_context() uses "cache + 1". */
|
||||||
DEFINE_SPINLOCK(ctx_alloc_lock);
|
DEFINE_SPINLOCK(ctx_alloc_lock);
|
||||||
unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
|
unsigned long tlb_context_cache = CTX_FIRST_VERSION;
|
||||||
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
|
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
|
||||||
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
|
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
|
||||||
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
|
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
|
||||||
|
DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
|
||||||
|
|
||||||
|
static void mmu_context_wrap(void)
|
||||||
|
{
|
||||||
|
unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
|
||||||
|
unsigned long new_ver, new_ctx, old_ctx;
|
||||||
|
struct mm_struct *mm;
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
|
||||||
|
|
||||||
|
/* Reserve kernel context */
|
||||||
|
set_bit(0, mmu_context_bmap);
|
||||||
|
|
||||||
|
new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
|
||||||
|
if (unlikely(new_ver == 0))
|
||||||
|
new_ver = CTX_FIRST_VERSION;
|
||||||
|
tlb_context_cache = new_ver;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Make sure that any new mm that are added into per_cpu_secondary_mm,
|
||||||
|
* are going to go through get_new_mmu_context() path.
|
||||||
|
*/
|
||||||
|
mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Updated versions to current on those CPUs that had valid secondary
|
||||||
|
* contexts
|
||||||
|
*/
|
||||||
|
for_each_online_cpu(cpu) {
|
||||||
|
/*
|
||||||
|
* If a new mm is stored after we took this mm from the array,
|
||||||
|
* it will go into get_new_mmu_context() path, because we
|
||||||
|
* already bumped the version in tlb_context_cache.
|
||||||
|
*/
|
||||||
|
mm = per_cpu(per_cpu_secondary_mm, cpu);
|
||||||
|
|
||||||
|
if (unlikely(!mm || mm == &init_mm))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
old_ctx = mm->context.sparc64_ctx_val;
|
||||||
|
if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
|
||||||
|
new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
|
||||||
|
set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
|
||||||
|
mm->context.sparc64_ctx_val = new_ctx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* Caller does TLB context flushing on local CPU if necessary.
|
/* Caller does TLB context flushing on local CPU if necessary.
|
||||||
* The caller also ensures that CTX_VALID(mm->context) is false.
|
* The caller also ensures that CTX_VALID(mm->context) is false.
|
||||||
@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
|
|||||||
{
|
{
|
||||||
unsigned long ctx, new_ctx;
|
unsigned long ctx, new_ctx;
|
||||||
unsigned long orig_pgsz_bits;
|
unsigned long orig_pgsz_bits;
|
||||||
int new_version;
|
|
||||||
|
|
||||||
spin_lock(&ctx_alloc_lock);
|
spin_lock(&ctx_alloc_lock);
|
||||||
|
retry:
|
||||||
|
/* wrap might have happened, test again if our context became valid */
|
||||||
|
if (unlikely(CTX_VALID(mm->context)))
|
||||||
|
goto out;
|
||||||
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
|
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
|
||||||
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
|
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
|
||||||
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
|
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
|
||||||
new_version = 0;
|
|
||||||
if (new_ctx >= (1 << CTX_NR_BITS)) {
|
if (new_ctx >= (1 << CTX_NR_BITS)) {
|
||||||
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
|
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
|
||||||
if (new_ctx >= ctx) {
|
if (new_ctx >= ctx) {
|
||||||
int i;
|
mmu_context_wrap();
|
||||||
new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
|
goto retry;
|
||||||
CTX_FIRST_VERSION;
|
|
||||||
if (new_ctx == 1)
|
|
||||||
new_ctx = CTX_FIRST_VERSION;
|
|
||||||
|
|
||||||
/* Don't call memset, for 16 entries that's just
|
|
||||||
* plain silly...
|
|
||||||
*/
|
|
||||||
mmu_context_bmap[0] = 3;
|
|
||||||
mmu_context_bmap[1] = 0;
|
|
||||||
mmu_context_bmap[2] = 0;
|
|
||||||
mmu_context_bmap[3] = 0;
|
|
||||||
for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
|
|
||||||
mmu_context_bmap[i + 0] = 0;
|
|
||||||
mmu_context_bmap[i + 1] = 0;
|
|
||||||
mmu_context_bmap[i + 2] = 0;
|
|
||||||
mmu_context_bmap[i + 3] = 0;
|
|
||||||
}
|
|
||||||
new_version = 1;
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (mm->context.sparc64_ctx_val)
|
||||||
|
cpumask_clear(mm_cpumask(mm));
|
||||||
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
|
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
|
||||||
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
|
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
|
||||||
out:
|
|
||||||
tlb_context_cache = new_ctx;
|
tlb_context_cache = new_ctx;
|
||||||
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
|
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
|
||||||
|
out:
|
||||||
spin_unlock(&ctx_alloc_lock);
|
spin_unlock(&ctx_alloc_lock);
|
||||||
|
|
||||||
if (unlikely(new_version))
|
|
||||||
smp_new_mmu_context_version();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int numa_enabled = 1;
|
static int numa_enabled = 1;
|
||||||
|
@ -496,7 +496,8 @@ retry_tsb_alloc:
|
|||||||
extern void copy_tsb(unsigned long old_tsb_base,
|
extern void copy_tsb(unsigned long old_tsb_base,
|
||||||
unsigned long old_tsb_size,
|
unsigned long old_tsb_size,
|
||||||
unsigned long new_tsb_base,
|
unsigned long new_tsb_base,
|
||||||
unsigned long new_tsb_size);
|
unsigned long new_tsb_size,
|
||||||
|
unsigned long page_size_shift);
|
||||||
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
unsigned long old_tsb_base = (unsigned long) old_tsb;
|
||||||
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
unsigned long new_tsb_base = (unsigned long) new_tsb;
|
||||||
|
|
||||||
@ -504,7 +505,9 @@ retry_tsb_alloc:
|
|||||||
old_tsb_base = __pa(old_tsb_base);
|
old_tsb_base = __pa(old_tsb_base);
|
||||||
new_tsb_base = __pa(new_tsb_base);
|
new_tsb_base = __pa(new_tsb_base);
|
||||||
}
|
}
|
||||||
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
|
copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
|
||||||
|
tsb_index == MM_TSB_BASE ?
|
||||||
|
PAGE_SHIFT : REAL_HPAGE_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
mm->context.tsb_block[tsb_index].tsb = new_tsb;
|
||||||
|
@ -971,11 +971,6 @@ xcall_capture:
|
|||||||
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
|
||||||
retry
|
retry
|
||||||
|
|
||||||
.globl xcall_new_mmu_context_version
|
|
||||||
xcall_new_mmu_context_version:
|
|
||||||
wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
|
|
||||||
retry
|
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
.globl xcall_kgdb_capture
|
.globl xcall_kgdb_capture
|
||||||
xcall_kgdb_capture:
|
xcall_kgdb_capture:
|
||||||
|
@ -360,7 +360,7 @@ config SMP
|
|||||||
Management" code will be disabled if you say Y here.
|
Management" code will be disabled if you say Y here.
|
||||||
|
|
||||||
See also <file:Documentation/x86/i386/IO-APIC.txt>,
|
See also <file:Documentation/x86/i386/IO-APIC.txt>,
|
||||||
<file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
|
<file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at
|
||||||
<http://www.tldp.org/docs.html#howto>.
|
<http://www.tldp.org/docs.html#howto>.
|
||||||
|
|
||||||
If you don't know what to do here, say N.
|
If you don't know what to do here, say N.
|
||||||
@ -2776,10 +2776,6 @@ config COMPAT_FOR_U64_ALIGNMENT
|
|||||||
config SYSVIPC_COMPAT
|
config SYSVIPC_COMPAT
|
||||||
def_bool y
|
def_bool y
|
||||||
depends on SYSVIPC
|
depends on SYSVIPC
|
||||||
|
|
||||||
config KEYS_COMPAT
|
|
||||||
def_bool y
|
|
||||||
depends on KEYS
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
|
|||||||
quiet_cmd_check_data_rel = DATAREL $@
|
quiet_cmd_check_data_rel = DATAREL $@
|
||||||
define cmd_check_data_rel
|
define cmd_check_data_rel
|
||||||
for obj in $(filter %.o,$^); do \
|
for obj in $(filter %.o,$^); do \
|
||||||
readelf -S $$obj | grep -qF .rel.local && { \
|
${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
|
||||||
echo "error: $$obj has data relocations!" >&2; \
|
echo "error: $$obj has data relocations!" >&2; \
|
||||||
exit 1; \
|
exit 1; \
|
||||||
} || true; \
|
} || true; \
|
||||||
|
@ -251,6 +251,23 @@ ENTRY(__switch_to_asm)
|
|||||||
jmp __switch_to
|
jmp __switch_to
|
||||||
END(__switch_to_asm)
|
END(__switch_to_asm)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The unwinder expects the last frame on the stack to always be at the same
|
||||||
|
* offset from the end of the page, which allows it to validate the stack.
|
||||||
|
* Calling schedule_tail() directly would break that convention because its an
|
||||||
|
* asmlinkage function so its argument has to be pushed on the stack. This
|
||||||
|
* wrapper creates a proper "end of stack" frame header before the call.
|
||||||
|
*/
|
||||||
|
ENTRY(schedule_tail_wrapper)
|
||||||
|
FRAME_BEGIN
|
||||||
|
|
||||||
|
pushl %eax
|
||||||
|
call schedule_tail
|
||||||
|
popl %eax
|
||||||
|
|
||||||
|
FRAME_END
|
||||||
|
ret
|
||||||
|
ENDPROC(schedule_tail_wrapper)
|
||||||
/*
|
/*
|
||||||
* A newly forked process directly context switches into this address.
|
* A newly forked process directly context switches into this address.
|
||||||
*
|
*
|
||||||
@ -259,24 +276,15 @@ END(__switch_to_asm)
|
|||||||
* edi: kernel thread arg
|
* edi: kernel thread arg
|
||||||
*/
|
*/
|
||||||
ENTRY(ret_from_fork)
|
ENTRY(ret_from_fork)
|
||||||
FRAME_BEGIN /* help unwinder find end of stack */
|
call schedule_tail_wrapper
|
||||||
|
|
||||||
/*
|
|
||||||
* schedule_tail() is asmlinkage so we have to put its 'prev' argument
|
|
||||||
* on the stack.
|
|
||||||
*/
|
|
||||||
pushl %eax
|
|
||||||
call schedule_tail
|
|
||||||
popl %eax
|
|
||||||
|
|
||||||
testl %ebx, %ebx
|
testl %ebx, %ebx
|
||||||
jnz 1f /* kernel threads are uncommon */
|
jnz 1f /* kernel threads are uncommon */
|
||||||
|
|
||||||
2:
|
2:
|
||||||
/* When we fork, we trace the syscall return in the child, too. */
|
/* When we fork, we trace the syscall return in the child, too. */
|
||||||
leal FRAME_OFFSET(%esp), %eax
|
movl %esp, %eax
|
||||||
call syscall_return_slowpath
|
call syscall_return_slowpath
|
||||||
FRAME_END
|
|
||||||
jmp restore_all
|
jmp restore_all
|
||||||
|
|
||||||
/* kernel thread */
|
/* kernel thread */
|
||||||
|
@ -36,7 +36,6 @@
|
|||||||
#include <asm/smap.h>
|
#include <asm/smap.h>
|
||||||
#include <asm/pgtable_types.h>
|
#include <asm/pgtable_types.h>
|
||||||
#include <asm/export.h>
|
#include <asm/export.h>
|
||||||
#include <asm/frame.h>
|
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
|
|
||||||
.code64
|
.code64
|
||||||
@ -406,7 +405,6 @@ END(__switch_to_asm)
|
|||||||
* r12: kernel thread arg
|
* r12: kernel thread arg
|
||||||
*/
|
*/
|
||||||
ENTRY(ret_from_fork)
|
ENTRY(ret_from_fork)
|
||||||
FRAME_BEGIN /* help unwinder find end of stack */
|
|
||||||
movq %rax, %rdi
|
movq %rax, %rdi
|
||||||
call schedule_tail /* rdi: 'prev' task parameter */
|
call schedule_tail /* rdi: 'prev' task parameter */
|
||||||
|
|
||||||
@ -414,11 +412,10 @@ ENTRY(ret_from_fork)
|
|||||||
jnz 1f /* kernel threads are uncommon */
|
jnz 1f /* kernel threads are uncommon */
|
||||||
|
|
||||||
2:
|
2:
|
||||||
leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
|
movq %rsp, %rdi
|
||||||
call syscall_return_slowpath /* returns with IRQs disabled */
|
call syscall_return_slowpath /* returns with IRQs disabled */
|
||||||
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
TRACE_IRQS_ON /* user mode is traced as IRQS on */
|
||||||
SWAPGS
|
SWAPGS
|
||||||
FRAME_END
|
|
||||||
jmp restore_regs_and_iret
|
jmp restore_regs_and_iret
|
||||||
|
|
||||||
1:
|
1:
|
||||||
|
@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
int mce_available(struct cpuinfo_x86 *c);
|
int mce_available(struct cpuinfo_x86 *c);
|
||||||
|
bool mce_is_memory_error(struct mce *m);
|
||||||
|
|
||||||
DECLARE_PER_CPU(unsigned, mce_exception_count);
|
DECLARE_PER_CPU(unsigned, mce_exception_count);
|
||||||
DECLARE_PER_CPU(unsigned, mce_poll_count);
|
DECLARE_PER_CPU(unsigned, mce_poll_count);
|
||||||
|
@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
|
|||||||
memcpy(insnbuf, replacement, a->replacementlen);
|
memcpy(insnbuf, replacement, a->replacementlen);
|
||||||
insnbuf_sz = a->replacementlen;
|
insnbuf_sz = a->replacementlen;
|
||||||
|
|
||||||
/* 0xe8 is a relative jump; fix the offset. */
|
/*
|
||||||
if (*insnbuf == 0xe8 && a->replacementlen == 5) {
|
* 0xe8 is a relative jump; fix the offset.
|
||||||
|
*
|
||||||
|
* Instruction length is checked before the opcode to avoid
|
||||||
|
* accessing uninitialized bytes for zero-length replacements.
|
||||||
|
*/
|
||||||
|
if (a->replacementlen == 5 && *insnbuf == 0xe8) {
|
||||||
*(s32 *)(insnbuf + 1) += replacement - instr;
|
*(s32 *)(insnbuf + 1) += replacement - instr;
|
||||||
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
|
DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx",
|
||||||
*(s32 *)(insnbuf + 1),
|
*(s32 *)(insnbuf + 1),
|
||||||
|
@ -255,6 +255,7 @@ static void init_cyrix(struct cpuinfo_x86 *c)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
|
case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */
|
||||||
|
case 11: /* GX1 with inverted Device ID */
|
||||||
#ifdef CONFIG_PCI
|
#ifdef CONFIG_PCI
|
||||||
{
|
{
|
||||||
u32 vendor, device;
|
u32 vendor, device;
|
||||||
|
@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool memory_error(struct mce *m)
|
bool mce_is_memory_error(struct mce *m)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
if (m->cpuvendor == X86_VENDOR_AMD) {
|
||||||
|
|
||||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
|
||||||
/* ErrCodeExt[20:16] */
|
/* ErrCodeExt[20:16] */
|
||||||
u8 xec = (m->status >> 16) & 0x1f;
|
u8 xec = (m->status >> 16) & 0x1f;
|
||||||
|
|
||||||
return (xec == 0x0 || xec == 0x8);
|
return (xec == 0x0 || xec == 0x8);
|
||||||
} else if (c->x86_vendor == X86_VENDOR_INTEL) {
|
} else if (m->cpuvendor == X86_VENDOR_INTEL) {
|
||||||
/*
|
/*
|
||||||
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
|
* Intel SDM Volume 3B - 15.9.2 Compound Error Codes
|
||||||
*
|
*
|
||||||
@ -529,6 +527,7 @@ static bool memory_error(struct mce *m)
|
|||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(mce_is_memory_error);
|
||||||
|
|
||||||
static bool cec_add_mce(struct mce *m)
|
static bool cec_add_mce(struct mce *m)
|
||||||
{
|
{
|
||||||
@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* We eat only correctable DRAM errors with usable addresses. */
|
/* We eat only correctable DRAM errors with usable addresses. */
|
||||||
if (memory_error(m) &&
|
if (mce_is_memory_error(m) &&
|
||||||
!(m->status & MCI_STATUS_UC) &&
|
!(m->status & MCI_STATUS_UC) &&
|
||||||
mce_usable_address(m))
|
mce_usable_address(m))
|
||||||
if (!cec_add_elem(m->addr >> PAGE_SHIFT))
|
if (!cec_add_elem(m->addr >> PAGE_SHIFT))
|
||||||
@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
|
|||||||
|
|
||||||
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
|
severity = mce_severity(&m, mca_cfg.tolerant, NULL, false);
|
||||||
|
|
||||||
if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m))
|
if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m))
|
||||||
if (m.status & MCI_STATUS_ADDRV)
|
if (m.status & MCI_STATUS_ADDRV)
|
||||||
m.severity = severity;
|
m.severity = severity;
|
||||||
|
|
||||||
|
@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum ucode_state
|
static enum ucode_state
|
||||||
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size);
|
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
|
||||||
|
|
||||||
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
||||||
{
|
{
|
||||||
@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
|
|||||||
if (!desc.mc)
|
if (!desc.mc)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax),
|
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||||
desc.data, desc.size);
|
|
||||||
if (ret != UCODE_OK)
|
if (ret != UCODE_OK)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static enum ucode_state
|
static enum ucode_state
|
||||||
load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
|
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
|
||||||
{
|
{
|
||||||
enum ucode_state ret;
|
enum ucode_state ret;
|
||||||
|
|
||||||
@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size)
|
|||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/* save BSP's matching patch for early load */
|
/* save BSP's matching patch for early load */
|
||||||
if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
|
if (save) {
|
||||||
struct ucode_patch *p = find_patch(cpu);
|
struct ucode_patch *p = find_patch(0);
|
||||||
if (p) {
|
if (p) {
|
||||||
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
|
||||||
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
|
||||||
@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
|||||||
{
|
{
|
||||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||||
|
bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
|
||||||
enum ucode_state ret = UCODE_NFOUND;
|
enum ucode_state ret = UCODE_NFOUND;
|
||||||
const struct firmware *fw;
|
const struct firmware *fw;
|
||||||
|
|
||||||
/* reload ucode container only on the boot cpu */
|
/* reload ucode container only on the boot cpu */
|
||||||
if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index)
|
if (!refresh_fw || !bsp)
|
||||||
return UCODE_OK;
|
return UCODE_OK;
|
||||||
|
|
||||||
if (c->x86 >= 0x15)
|
if (c->x86 >= 0x15)
|
||||||
@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
|
|||||||
goto fw_release;
|
goto fw_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size);
|
ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
|
||||||
|
|
||||||
fw_release:
|
fw_release:
|
||||||
release_firmware(fw);
|
release_firmware(fw);
|
||||||
|
@ -619,6 +619,9 @@ int __init save_microcode_in_initrd_intel(void)
|
|||||||
|
|
||||||
show_saved_mc();
|
show_saved_mc();
|
||||||
|
|
||||||
|
/* initrd is going away, clear patch ptr. */
|
||||||
|
intel_ucode_patch = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size)
|
|||||||
{
|
{
|
||||||
return module_alloc(size);
|
return module_alloc(size);
|
||||||
}
|
}
|
||||||
static inline void tramp_free(void *tramp)
|
static inline void tramp_free(void *tramp, int size)
|
||||||
{
|
{
|
||||||
|
int npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
set_memory_nx((unsigned long)tramp, npages);
|
||||||
|
set_memory_rw((unsigned long)tramp, npages);
|
||||||
module_memfree(tramp);
|
module_memfree(tramp);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size)
|
|||||||
{
|
{
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
static inline void tramp_free(void *tramp) { }
|
static inline void tramp_free(void *tramp, int size) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Defined as markers to the end of the ftrace default trampolines */
|
/* Defined as markers to the end of the ftrace default trampolines */
|
||||||
@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||||||
/* Copy ftrace_caller onto the trampoline memory */
|
/* Copy ftrace_caller onto the trampoline memory */
|
||||||
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
|
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
|
||||||
if (WARN_ON(ret < 0)) {
|
if (WARN_ON(ret < 0)) {
|
||||||
tramp_free(trampoline);
|
tramp_free(trampoline, *tramp_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||||||
|
|
||||||
/* Are we pointing to the reference? */
|
/* Are we pointing to the reference? */
|
||||||
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
|
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
|
||||||
tramp_free(trampoline);
|
tramp_free(trampoline, *tramp_size);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
|
|||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
unsigned long ip;
|
unsigned long ip;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
int ret;
|
int ret, npages;
|
||||||
|
|
||||||
if (ops->trampoline) {
|
if (ops->trampoline) {
|
||||||
/*
|
/*
|
||||||
@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
|
|||||||
*/
|
*/
|
||||||
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
|
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
|
||||||
return;
|
return;
|
||||||
|
npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT;
|
||||||
|
set_memory_rw(ops->trampoline, npages);
|
||||||
} else {
|
} else {
|
||||||
ops->trampoline = create_trampoline(ops, &size);
|
ops->trampoline = create_trampoline(ops, &size);
|
||||||
if (!ops->trampoline)
|
if (!ops->trampoline)
|
||||||
return;
|
return;
|
||||||
ops->trampoline_size = size;
|
ops->trampoline_size = size;
|
||||||
|
npages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
|
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
|
||||||
@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
|
|||||||
/* Do a safe modify in case the trampoline is executing */
|
/* Do a safe modify in case the trampoline is executing */
|
||||||
new = ftrace_call_replace(ip, (unsigned long)func);
|
new = ftrace_call_replace(ip, (unsigned long)func);
|
||||||
ret = update_ftrace_func(ip, new);
|
ret = update_ftrace_func(ip, new);
|
||||||
|
set_memory_ro(ops->trampoline, npages);
|
||||||
|
|
||||||
/* The update should never fail */
|
/* The update should never fail */
|
||||||
WARN_ON(ret);
|
WARN_ON(ret);
|
||||||
@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
|
|||||||
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
|
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
tramp_free((void *)ops->trampoline);
|
tramp_free((void *)ops->trampoline, ops->trampoline_size);
|
||||||
ops->trampoline = 0;
|
ops->trampoline = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,6 +52,7 @@
|
|||||||
#include <linux/ftrace.h>
|
#include <linux/ftrace.h>
|
||||||
#include <linux/frame.h>
|
#include <linux/frame.h>
|
||||||
#include <linux/kasan.h>
|
#include <linux/kasan.h>
|
||||||
|
#include <linux/moduleloader.h>
|
||||||
|
|
||||||
#include <asm/text-patching.h>
|
#include <asm/text-patching.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Recover page to RW mode before releasing it */
|
||||||
|
void free_insn_page(void *page)
|
||||||
|
{
|
||||||
|
set_memory_nx((unsigned long)page & PAGE_MASK, 1);
|
||||||
|
set_memory_rw((unsigned long)page & PAGE_MASK, 1);
|
||||||
|
module_memfree(page);
|
||||||
|
}
|
||||||
|
|
||||||
static int arch_copy_kprobe(struct kprobe *p)
|
static int arch_copy_kprobe(struct kprobe *p)
|
||||||
{
|
{
|
||||||
struct insn insn;
|
struct insn insn;
|
||||||
|
@ -161,8 +161,8 @@ void kvm_async_pf_task_wait(u32 token)
|
|||||||
*/
|
*/
|
||||||
rcu_irq_exit();
|
rcu_irq_exit();
|
||||||
native_safe_halt();
|
native_safe_halt();
|
||||||
rcu_irq_enter();
|
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
rcu_irq_enter();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!n.halted)
|
if (!n.halted)
|
||||||
|
@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all)
|
|||||||
|
|
||||||
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
|
printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip);
|
||||||
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
|
printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags,
|
||||||
smp_processor_id());
|
raw_smp_processor_id());
|
||||||
|
|
||||||
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
||||||
regs->ax, regs->bx, regs->cx, regs->dx);
|
regs->ax, regs->bx, regs->cx, regs->dx);
|
||||||
|
@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
*/
|
*/
|
||||||
x86_configure_nx();
|
x86_configure_nx();
|
||||||
|
|
||||||
simple_udelay_calibration();
|
|
||||||
|
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||||
@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
*/
|
*/
|
||||||
init_hypervisor_platform();
|
init_hypervisor_platform();
|
||||||
|
|
||||||
|
simple_udelay_calibration();
|
||||||
|
|
||||||
x86_init.resources.probe_roms();
|
x86_init.resources.probe_roms();
|
||||||
|
|
||||||
/* after parse_early_param, so could debug it */
|
/* after parse_early_param, so could debug it */
|
||||||
|
@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state)
|
|||||||
return (unsigned long *)task_pt_regs(state->task) - 2;
|
return (unsigned long *)task_pt_regs(state->task) - 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_last_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return state->bp == last_frame(state);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
#define GCC_REALIGN_WORDS 3
|
#define GCC_REALIGN_WORDS 3
|
||||||
#else
|
#else
|
||||||
@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state)
|
|||||||
return last_frame(state) - GCC_REALIGN_WORDS;
|
return last_frame(state) - GCC_REALIGN_WORDS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_last_task_frame(struct unwind_state *state)
|
static bool is_last_aligned_frame(struct unwind_state *state)
|
||||||
{
|
{
|
||||||
unsigned long *last_bp = last_frame(state);
|
unsigned long *last_bp = last_frame(state);
|
||||||
unsigned long *aligned_bp = last_aligned_frame(state);
|
unsigned long *aligned_bp = last_aligned_frame(state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We have to check for the last task frame at two different locations
|
* GCC can occasionally decide to realign the stack pointer and change
|
||||||
* because gcc can occasionally decide to realign the stack pointer and
|
* the offset of the stack frame in the prologue of a function called
|
||||||
* change the offset of the stack frame in the prologue of a function
|
* by head/entry code. Examples:
|
||||||
* called by head/entry code. Examples:
|
|
||||||
*
|
*
|
||||||
* <start_secondary>:
|
* <start_secondary>:
|
||||||
* push %edi
|
* push %edi
|
||||||
@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state)
|
|||||||
* push %rbp
|
* push %rbp
|
||||||
* mov %rsp,%rbp
|
* mov %rsp,%rbp
|
||||||
*
|
*
|
||||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
* After aligning the stack, it pushes a duplicate copy of the return
|
||||||
* the return address before pushing the frame pointer.
|
* address before pushing the frame pointer.
|
||||||
*/
|
*/
|
||||||
return (state->bp == last_bp ||
|
return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1));
|
||||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
}
|
||||||
|
|
||||||
|
static bool is_last_ftrace_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
unsigned long *last_bp = last_frame(state);
|
||||||
|
unsigned long *last_ftrace_bp = last_bp - 3;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When unwinding from an ftrace handler of a function called by entry
|
||||||
|
* code, the stack layout of the last frame is:
|
||||||
|
*
|
||||||
|
* bp
|
||||||
|
* parent ret addr
|
||||||
|
* bp
|
||||||
|
* function ret addr
|
||||||
|
* parent ret addr
|
||||||
|
* pt_regs
|
||||||
|
* -----------------
|
||||||
|
*/
|
||||||
|
return (state->bp == last_ftrace_bp &&
|
||||||
|
*state->bp == *(state->bp + 2) &&
|
||||||
|
*(state->bp + 1) == *(state->bp + 4));
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool is_last_task_frame(struct unwind_state *state)
|
||||||
|
{
|
||||||
|
return is_last_frame(state) || is_last_aligned_frame(state) ||
|
||||||
|
is_last_ftrace_frame(state);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -780,18 +780,20 @@ out:
|
|||||||
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
|
||||||
{
|
{
|
||||||
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
|
||||||
int j, nent = vcpu->arch.cpuid_nent;
|
struct kvm_cpuid_entry2 *ej;
|
||||||
|
int j = i;
|
||||||
|
int nent = vcpu->arch.cpuid_nent;
|
||||||
|
|
||||||
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||||
/* when no next entry is found, the current entry[i] is reselected */
|
/* when no next entry is found, the current entry[i] is reselected */
|
||||||
for (j = i + 1; ; j = (j + 1) % nent) {
|
do {
|
||||||
struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
|
j = (j + 1) % nent;
|
||||||
if (ej->function == e->function) {
|
ej = &vcpu->arch.cpuid_entries[j];
|
||||||
|
} while (ej->function != e->function);
|
||||||
|
|
||||||
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
|
||||||
|
|
||||||
return j;
|
return j;
|
||||||
}
|
|
||||||
}
|
|
||||||
return 0; /* silence gcc, even though control never reaches here */
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* find an entry with matching function, matching index (if needed), and that
|
/* find an entry with matching function, matching index (if needed), and that
|
||||||
|
@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use);
|
|||||||
|
|
||||||
static void cancel_hv_timer(struct kvm_lapic *apic)
|
static void cancel_hv_timer(struct kvm_lapic *apic)
|
||||||
{
|
{
|
||||||
|
preempt_disable();
|
||||||
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
kvm_x86_ops->cancel_hv_timer(apic->vcpu);
|
||||||
apic->lapic_timer.hv_timer_in_use = false;
|
apic->lapic_timer.hv_timer_in_use = false;
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool start_hv_timer(struct kvm_lapic *apic)
|
static bool start_hv_timer(struct kvm_lapic *apic)
|
||||||
@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
|
|||||||
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
for (i = 0; i < KVM_APIC_LVT_NUM; i++)
|
||||||
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
|
||||||
apic_update_lvtt(apic);
|
apic_update_lvtt(apic);
|
||||||
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
if (kvm_vcpu_is_reset_bsp(vcpu) &&
|
||||||
|
kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
|
||||||
kvm_lapic_set_reg(apic, APIC_LVT0,
|
kvm_lapic_set_reg(apic, APIC_LVT0,
|
||||||
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
|
||||||
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
|
||||||
|
@ -3698,12 +3698,15 @@ static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn)
|
|||||||
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool can_do_async_pf(struct kvm_vcpu *vcpu)
|
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
if (unlikely(!lapic_in_kernel(vcpu) ||
|
if (unlikely(!lapic_in_kernel(vcpu) ||
|
||||||
kvm_event_needs_reinjection(vcpu)))
|
kvm_event_needs_reinjection(vcpu)))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
if (is_guest_mode(vcpu))
|
||||||
|
return false;
|
||||||
|
|
||||||
return kvm_x86_ops->interrupt_allowed(vcpu);
|
return kvm_x86_ops->interrupt_allowed(vcpu);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3719,7 +3722,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
|
|||||||
if (!async)
|
if (!async)
|
||||||
return false; /* *pfn has correct page already */
|
return false; /* *pfn has correct page already */
|
||||||
|
|
||||||
if (!prefault && can_do_async_pf(vcpu)) {
|
if (!prefault && kvm_can_do_async_pf(vcpu)) {
|
||||||
trace_kvm_try_async_get_page(gva, gfn);
|
trace_kvm_try_async_get_page(gva, gfn);
|
||||||
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
|
||||||
trace_kvm_async_pf_doublefault(gva, gfn);
|
trace_kvm_async_pf_doublefault(gva, gfn);
|
||||||
|
@ -76,6 +76,7 @@ int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
|||||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
|
||||||
bool accessed_dirty);
|
bool accessed_dirty);
|
||||||
|
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
|
||||||
|
|
||||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||||
{
|
{
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user