Merge branch 'linus' into x86/urgent, to pick up dependent commit
We want to fix:
0e11073247
("x86/retpoline: Do the necessary fixup to the Zen3/4 srso return thunk for !SRSO")
So merge in Linus's latest into x86/urgent to have it available.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
5f2ca44ed2
5
.mailmap
5
.mailmap
@ -20,6 +20,7 @@ Adam Oldham <oldhamca@gmail.com>
|
||||
Adam Radford <aradford@gmail.com>
|
||||
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
|
||||
Adrian Bunk <bunk@stusta.de>
|
||||
Ajay Kaher <ajay.kaher@broadcom.com> <akaher@vmware.com>
|
||||
Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
|
||||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
@ -36,6 +37,7 @@ Alexei Avshalom Lazar <quic_ailizaro@quicinc.com> <ailizaro@codeaurora.org>
|
||||
Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@fb.com>
|
||||
Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com>
|
||||
Alexey Makhalov <alexey.amakhalov@broadcom.com> <amakhalov@vmware.com>
|
||||
Alex Hung <alexhung@gmail.com> <alex.hung@canonical.com>
|
||||
Alex Shi <alexs@kernel.org> <alex.shi@intel.com>
|
||||
Alex Shi <alexs@kernel.org> <alex.shi@linaro.org>
|
||||
@ -110,6 +112,7 @@ Brendan Higgins <brendan.higgins@linux.dev> <brendanhiggins@google.com>
|
||||
Brian Avery <b.avery@hp.com>
|
||||
Brian King <brking@us.ibm.com>
|
||||
Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
|
||||
Bryan Tan <bryan-bt.tan@broadcom.com> <bryantan@vmware.com>
|
||||
Cai Huoqing <cai.huoqing@linux.dev> <caihuoqing@baidu.com>
|
||||
Can Guo <quic_cang@quicinc.com> <cang@codeaurora.org>
|
||||
Carl Huang <quic_cjhuang@quicinc.com> <cjhuang@codeaurora.org>
|
||||
@ -529,6 +532,7 @@ Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
|
||||
Ronak Doshi <ronak.doshi@broadcom.com> <doshir@vmware.com>
|
||||
Muchun Song <muchun.song@linux.dev> <songmuchun@bytedance.com>
|
||||
Muchun Song <muchun.song@linux.dev> <smuchun@gmail.com>
|
||||
Ross Zwisler <zwisler@kernel.org> <ross.zwisler@linux.intel.com>
|
||||
@ -651,6 +655,7 @@ Viresh Kumar <vireshk@kernel.org> <viresh.kumar@st.com>
|
||||
Viresh Kumar <vireshk@kernel.org> <viresh.linux@gmail.com>
|
||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.org>
|
||||
Viresh Kumar <viresh.kumar@linaro.org> <viresh.kumar@linaro.com>
|
||||
Vishnu Dasa <vishnu.dasa@broadcom.com> <vdasa@vmware.com>
|
||||
Vivek Aknurwar <quic_viveka@quicinc.com> <viveka@codeaurora.org>
|
||||
Vivien Didelot <vivien.didelot@gmail.com> <vivien.didelot@savoirfairelinux.com>
|
||||
Vlad Dogaru <ddvlad@gmail.com> <vlad.dogaru@intel.com>
|
||||
|
@ -6599,7 +6599,7 @@
|
||||
To turn off having tracepoints sent to printk,
|
||||
echo 0 > /proc/sys/kernel/tracepoint_printk
|
||||
Note, echoing 1 into this file without the
|
||||
tracepoint_printk kernel cmdline option has no effect.
|
||||
tp_printk kernel cmdline option has no effect.
|
||||
|
||||
The tp_printk_stop_on_boot (see below) can also be used
|
||||
to stop the printing of events to console at
|
||||
|
@ -155,7 +155,7 @@ Setting this parameter to 100 will disable the hysteresis.
|
||||
|
||||
Some users cannot tolerate the swapping that comes with zswap store failures
|
||||
and zswap writebacks. Swapping can be disabled entirely (without disabling
|
||||
zswap itself) on a cgroup-basis as follows:
|
||||
zswap itself) on a cgroup-basis as follows::
|
||||
|
||||
echo 0 > /sys/fs/cgroup/<cgroup-name>/memory.zswap.writeback
|
||||
|
||||
@ -166,7 +166,7 @@ writeback (because the same pages might be rejected again and again).
|
||||
When there is a sizable amount of cold memory residing in the zswap pool, it
|
||||
can be advantageous to proactively write these cold pages to swap and reclaim
|
||||
the memory for other use cases. By default, the zswap shrinker is disabled.
|
||||
User can enable it as follows:
|
||||
User can enable it as follows::
|
||||
|
||||
echo Y > /sys/module/zswap/parameters/shrinker_enabled
|
||||
|
||||
|
@ -104,6 +104,8 @@ Some of these tools are listed below:
|
||||
KASAN and can be used in production. See Documentation/dev-tools/kfence.rst
|
||||
* lockdep is a locking correctness validator. See
|
||||
Documentation/locking/lockdep-design.rst
|
||||
* Runtime Verification (RV) supports checking specific behaviours for a given
|
||||
subsystem. See Documentation/trace/rv/runtime-verification.rst
|
||||
* There are several other pieces of debug instrumentation in the kernel, many
|
||||
of which can be found in lib/Kconfig.debug
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
Status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
Binding for Keystone gate control driver which uses PSC controller IP.
|
||||
|
||||
This binding uses the common clock binding[1].
|
||||
|
@ -1,5 +1,3 @@
|
||||
Status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
Binding for keystone PLLs. The main PLL IP typically has a multiplier,
|
||||
a divider and a post divider. The additional PLL IPs like ARMPLL, DDRPLL
|
||||
and PAPLL are controlled by the memory mapped register where as the Main
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments ADPLL clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped ADPLL with two to three selectable input clocks
|
||||
and three to four children.
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments APLL clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped APLL with usually two selectable input clocks
|
||||
(reference clock and bypass clock), with analog phase locked
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments autoidle clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a register mapped
|
||||
clock which can be put to idle automatically by hardware based on the usage
|
||||
and a configuration bit setting. Autoidle clock is never an individual
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments clockdomain.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1] in consumer role.
|
||||
Every clock on TI SoC belongs to one clockdomain, but software
|
||||
only needs this information for specific clocks which require
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for TI composite clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped composite clock with multiple different sub-types;
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for TI divider clock
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped adjustable clock rate divider that does not gate and has
|
||||
only one input clock or parent. By default the value programmed into
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments DPLL clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped DPLL with usually two selectable input clocks
|
||||
(reference clock and bypass clock), with digital phase locked
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments FAPLL clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped FAPLL with usually two selectable input clocks
|
||||
(reference clock and bypass clock), and one or more child
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for TI fixed factor rate clock sources.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1], and also uses the autoidle
|
||||
support from TI autoidle clock [2].
|
||||
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments gate clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. This clock is
|
||||
quite much similar to the basic gate-clock [2], however,
|
||||
it supports a number of additional features. If no register
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for Texas Instruments interface clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. This clock is
|
||||
quite much similar to the basic gate-clock [2], however,
|
||||
it supports a number of additional features, including
|
||||
|
@ -1,7 +1,5 @@
|
||||
Binding for TI mux clock.
|
||||
|
||||
Binding status: Unstable - ABI compatibility may be broken in the future
|
||||
|
||||
This binding uses the common clock binding[1]. It assumes a
|
||||
register-mapped multiplexer with multiple input clock signals or
|
||||
parents, one of which can be selected as output. This clock does not
|
||||
|
@ -144,6 +144,8 @@ Example::
|
||||
#dma-cells = <1>;
|
||||
clocks = <&clock_controller 0>, <&clock_controller 1>;
|
||||
clock-names = "bus", "host";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
vendor,custom-property = <2>;
|
||||
status = "disabled";
|
||||
|
||||
|
@ -94,6 +94,10 @@ properties:
|
||||
|
||||
local-bd-address: true
|
||||
|
||||
qcom,local-bd-address-broken:
|
||||
type: boolean
|
||||
description:
|
||||
boot firmware is incorrectly passing the address in big-endian order
|
||||
|
||||
required:
|
||||
- compatible
|
||||
|
@ -1,9 +1,6 @@
|
||||
TI Davinci DSP devices
|
||||
=======================
|
||||
|
||||
Binding status: Unstable - Subject to changes for DT representation of clocks
|
||||
and resets
|
||||
|
||||
The TI Davinci family of SoCs usually contains a TI DSP Core sub-system that
|
||||
is used to offload some of the processor-intensive tasks or algorithms, for
|
||||
achieving various system level goals.
|
||||
|
@ -51,7 +51,7 @@ properties:
|
||||
ranges: true
|
||||
|
||||
patternProperties:
|
||||
"^clock-controller@[0-9a-z]+$":
|
||||
"^clock-controller@[0-9a-f]+$":
|
||||
$ref: /schemas/clock/fsl,flexspi-clock.yaml#
|
||||
|
||||
required:
|
||||
|
@ -41,7 +41,7 @@ properties:
|
||||
ranges: true
|
||||
|
||||
patternProperties:
|
||||
"^interrupt-controller@[a-z0-9]+$":
|
||||
"^interrupt-controller@[a-f0-9]+$":
|
||||
$ref: /schemas/interrupt-controller/fsl,ls-extirq.yaml#
|
||||
|
||||
required:
|
||||
|
@ -60,7 +60,7 @@ properties:
|
||||
be implemented in an always-on power domain."
|
||||
|
||||
patternProperties:
|
||||
'^frame@[0-9a-z]*$':
|
||||
'^frame@[0-9a-f]+$':
|
||||
type: object
|
||||
additionalProperties: false
|
||||
description: A timer node has up to 8 frame sub-nodes, each with the following properties.
|
||||
|
@ -27,10 +27,13 @@ properties:
|
||||
- qcom,msm8996-ufshc
|
||||
- qcom,msm8998-ufshc
|
||||
- qcom,sa8775p-ufshc
|
||||
- qcom,sc7180-ufshc
|
||||
- qcom,sc7280-ufshc
|
||||
- qcom,sc8180x-ufshc
|
||||
- qcom,sc8280xp-ufshc
|
||||
- qcom,sdm845-ufshc
|
||||
- qcom,sm6115-ufshc
|
||||
- qcom,sm6125-ufshc
|
||||
- qcom,sm6350-ufshc
|
||||
- qcom,sm8150-ufshc
|
||||
- qcom,sm8250-ufshc
|
||||
@ -42,11 +45,11 @@ properties:
|
||||
- const: jedec,ufs-2.0
|
||||
|
||||
clocks:
|
||||
minItems: 8
|
||||
minItems: 7
|
||||
maxItems: 11
|
||||
|
||||
clock-names:
|
||||
minItems: 8
|
||||
minItems: 7
|
||||
maxItems: 11
|
||||
|
||||
dma-coherent: true
|
||||
@ -112,6 +115,31 @@ required:
|
||||
allOf:
|
||||
- $ref: ufs-common.yaml
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,sc7180-ufshc
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 7
|
||||
maxItems: 7
|
||||
clock-names:
|
||||
items:
|
||||
- const: core_clk
|
||||
- const: bus_aggr_clk
|
||||
- const: iface_clk
|
||||
- const: core_clk_unipro
|
||||
- const: ref_clk
|
||||
- const: tx_lane0_sync_clk
|
||||
- const: rx_lane0_sync_clk
|
||||
reg:
|
||||
maxItems: 1
|
||||
reg-names:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
@ -120,6 +148,7 @@ allOf:
|
||||
- qcom,msm8998-ufshc
|
||||
- qcom,sa8775p-ufshc
|
||||
- qcom,sc7280-ufshc
|
||||
- qcom,sc8180x-ufshc
|
||||
- qcom,sc8280xp-ufshc
|
||||
- qcom,sm8250-ufshc
|
||||
- qcom,sm8350-ufshc
|
||||
@ -215,6 +244,7 @@ allOf:
|
||||
contains:
|
||||
enum:
|
||||
- qcom,sm6115-ufshc
|
||||
- qcom,sm6125-ufshc
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
@ -248,7 +278,7 @@ allOf:
|
||||
reg:
|
||||
maxItems: 1
|
||||
clocks:
|
||||
minItems: 8
|
||||
minItems: 7
|
||||
maxItems: 8
|
||||
else:
|
||||
properties:
|
||||
@ -256,7 +286,7 @@ allOf:
|
||||
minItems: 1
|
||||
maxItems: 2
|
||||
clocks:
|
||||
minItems: 8
|
||||
minItems: 7
|
||||
maxItems: 11
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
76
Documentation/networking/devlink/devlink-eswitch-attr.rst
Normal file
@ -0,0 +1,76 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
==========================
|
||||
Devlink E-Switch Attribute
|
||||
==========================
|
||||
|
||||
Devlink E-Switch supports two modes of operation: legacy and switchdev.
|
||||
Legacy mode operates based on traditional MAC/VLAN steering rules. Switching
|
||||
decisions are made based on MAC addresses, VLANs, etc. There is limited ability
|
||||
to offload switching rules to hardware.
|
||||
|
||||
On the other hand, switchdev mode allows for more advanced offloading
|
||||
capabilities of the E-Switch to hardware. In switchdev mode, more switching
|
||||
rules and logic can be offloaded to the hardware switch ASIC. It enables
|
||||
representor netdevices that represent the slow path of virtual functions (VFs)
|
||||
or scalable-functions (SFs) of the device. See more information about
|
||||
:ref:`Documentation/networking/switchdev.rst <switchdev>` and
|
||||
:ref:`Documentation/networking/representors.rst <representors>`.
|
||||
|
||||
In addition, the devlink E-Switch also comes with other attributes listed
|
||||
in the following section.
|
||||
|
||||
Attributes Description
|
||||
======================
|
||||
|
||||
The following is a list of E-Switch attributes.
|
||||
|
||||
.. list-table:: E-Switch attributes
|
||||
:widths: 8 5 45
|
||||
|
||||
* - Name
|
||||
- Type
|
||||
- Description
|
||||
* - ``mode``
|
||||
- enum
|
||||
- The mode of the device. The mode can be one of the following:
|
||||
|
||||
* ``legacy`` operates based on traditional MAC/VLAN steering
|
||||
rules.
|
||||
* ``switchdev`` allows for more advanced offloading capabilities of
|
||||
the E-Switch to hardware.
|
||||
* - ``inline-mode``
|
||||
- enum
|
||||
- Some HWs need the VF driver to put part of the packet
|
||||
headers on the TX descriptor so the e-switch can do proper
|
||||
matching and steering. Support for both switchdev mode and legacy mode.
|
||||
|
||||
* ``none`` none.
|
||||
* ``link`` L2 mode.
|
||||
* ``network`` L3 mode.
|
||||
* ``transport`` L4 mode.
|
||||
* - ``encap-mode``
|
||||
- enum
|
||||
- The encapsulation mode of the device. Support for both switchdev mode
|
||||
and legacy mode. The mode can be one of the following:
|
||||
|
||||
* ``none`` Disable encapsulation support.
|
||||
* ``basic`` Enable encapsulation support.
|
||||
|
||||
Example Usage
|
||||
=============
|
||||
|
||||
.. code:: shell
|
||||
|
||||
# enable switchdev mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode switchdev
|
||||
|
||||
# set inline-mode and encap-mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 inline-mode none encap-mode basic
|
||||
|
||||
# display devlink device eswitch attributes
|
||||
$ devlink dev eswitch show pci/0000:08:00.0
|
||||
pci/0000:08:00.0: mode switchdev inline-mode none encap-mode basic
|
||||
|
||||
# enable encap-mode with legacy mode
|
||||
$ devlink dev eswitch set pci/0000:08:00.0 mode legacy inline-mode none encap-mode basic
|
@ -67,6 +67,7 @@ general.
|
||||
devlink-selftests
|
||||
devlink-trap
|
||||
devlink-linecard
|
||||
devlink-eswitch-attr
|
||||
|
||||
Driver-specific documentation
|
||||
-----------------------------
|
||||
|
@ -1,4 +1,5 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
.. _representors:
|
||||
|
||||
=============================
|
||||
Network Function Representors
|
||||
|
@ -46,21 +46,16 @@ SEV hardware uses ASIDs to associate a memory encryption key with a VM.
|
||||
Hence, the ASID for the SEV-enabled guests must be from 1 to a maximum value
|
||||
defined in the CPUID 0x8000001f[ecx] field.
|
||||
|
||||
SEV Key Management
|
||||
==================
|
||||
The KVM_MEMORY_ENCRYPT_OP ioctl
|
||||
===============================
|
||||
|
||||
The SEV guest key management is handled by a separate processor called the AMD
|
||||
Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
|
||||
key management interface to perform common hypervisor activities such as
|
||||
encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
|
||||
information, see the SEV Key Management spec [api-spec]_
|
||||
|
||||
The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP. If the argument
|
||||
to KVM_MEMORY_ENCRYPT_OP is NULL, the ioctl returns 0 if SEV is enabled
|
||||
and ``ENOTTY`` if it is disabled (on some older versions of Linux,
|
||||
the ioctl runs normally even with a NULL argument, and therefore will
|
||||
likely return ``EFAULT``). If non-NULL, the argument to KVM_MEMORY_ENCRYPT_OP
|
||||
must be a struct kvm_sev_cmd::
|
||||
The main ioctl to access SEV is KVM_MEMORY_ENCRYPT_OP, which operates on
|
||||
the VM file descriptor. If the argument to KVM_MEMORY_ENCRYPT_OP is NULL,
|
||||
the ioctl returns 0 if SEV is enabled and ``ENOTTY`` if it is disabled
|
||||
(on some older versions of Linux, the ioctl tries to run normally even
|
||||
with a NULL argument, and therefore will likely return ``EFAULT`` instead
|
||||
of zero if SEV is enabled). If non-NULL, the argument to
|
||||
KVM_MEMORY_ENCRYPT_OP must be a struct kvm_sev_cmd::
|
||||
|
||||
struct kvm_sev_cmd {
|
||||
__u32 id;
|
||||
@ -87,10 +82,6 @@ guests, such as launching, running, snapshotting, migrating and decommissioning.
|
||||
The KVM_SEV_INIT command is used by the hypervisor to initialize the SEV platform
|
||||
context. In a typical workflow, this command should be the first command issued.
|
||||
|
||||
The firmware can be initialized either by using its own non-volatile storage or
|
||||
the OS can manage the NV storage for the firmware using the module parameter
|
||||
``init_ex_path``. If the file specified by ``init_ex_path`` does not exist or
|
||||
is invalid, the OS will create or override the file with output from PSP.
|
||||
|
||||
Returns: 0 on success, -negative on error
|
||||
|
||||
@ -434,6 +425,21 @@ issued by the hypervisor to make the guest ready for execution.
|
||||
|
||||
Returns: 0 on success, -negative on error
|
||||
|
||||
Firmware Management
|
||||
===================
|
||||
|
||||
The SEV guest key management is handled by a separate processor called the AMD
|
||||
Secure Processor (AMD-SP). Firmware running inside the AMD-SP provides a secure
|
||||
key management interface to perform common hypervisor activities such as
|
||||
encrypting bootstrap code, snapshot, migrating and debugging the guest. For more
|
||||
information, see the SEV Key Management spec [api-spec]_
|
||||
|
||||
The AMD-SP firmware can be initialized either by using its own non-volatile
|
||||
storage or the OS can manage the NV storage for the firmware using
|
||||
parameter ``init_ex_path`` of the ``ccp`` module. If the file specified
|
||||
by ``init_ex_path`` does not exist or is invalid, the OS will create or
|
||||
override the file with PSP non-volatile storage.
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
|
@ -193,8 +193,8 @@ data:
|
||||
Asynchronous page fault (APF) control MSR.
|
||||
|
||||
Bits 63-6 hold 64-byte aligned physical address of a 64 byte memory area
|
||||
which must be in guest RAM and must be zeroed. This memory is expected
|
||||
to hold a copy of the following structure::
|
||||
which must be in guest RAM. This memory is expected to hold the
|
||||
following structure::
|
||||
|
||||
struct kvm_vcpu_pv_apf_data {
|
||||
/* Used for 'page not present' events delivered via #PF */
|
||||
@ -204,7 +204,6 @@ data:
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
Bits 5-4 of the MSR are reserved and should be zero. Bit 0 is set to 1
|
||||
@ -232,14 +231,14 @@ data:
|
||||
as regular page fault, guest must reset 'flags' to '0' before it does
|
||||
something that can generate normal page fault.
|
||||
|
||||
Bytes 5-7 of 64 byte memory location ('token') will be written to by the
|
||||
Bytes 4-7 of 64 byte memory location ('token') will be written to by the
|
||||
hypervisor at the time of APF 'page ready' event injection. The content
|
||||
of these bytes is a token which was previously delivered as 'page not
|
||||
present' event. The event indicates the page in now available. Guest is
|
||||
supposed to write '0' to 'token' when it is done handling 'page ready'
|
||||
event and to write 1' to MSR_KVM_ASYNC_PF_ACK after clearing the location;
|
||||
writing to the MSR forces KVM to re-scan its queue and deliver the next
|
||||
pending notification.
|
||||
of these bytes is a token which was previously delivered in CR2 as
|
||||
'page not present' event. The event indicates the page is now available.
|
||||
Guest is supposed to write '0' to 'token' when it is done handling
|
||||
'page ready' event and to write '1' to MSR_KVM_ASYNC_PF_ACK after
|
||||
clearing the location; writing to the MSR forces KVM to re-scan its
|
||||
queue and deliver the next pending notification.
|
||||
|
||||
Note, MSR_KVM_ASYNC_PF_INT MSR specifying the interrupt vector for 'page
|
||||
ready' APF delivery needs to be written to before enabling APF mechanism
|
||||
|
49
MAINTAINERS
49
MAINTAINERS
@ -14019,6 +14019,7 @@ F: drivers/net/ethernet/mellanox/mlx4/en_*
|
||||
|
||||
MELLANOX ETHERNET DRIVER (mlx5e)
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.mellanox.com
|
||||
@ -14086,6 +14087,7 @@ F: include/uapi/rdma/mlx4-abi.h
|
||||
MELLANOX MLX5 core VPI driver
|
||||
M: Saeed Mahameed <saeedm@nvidia.com>
|
||||
M: Leon Romanovsky <leonro@nvidia.com>
|
||||
M: Tariq Toukan <tariqt@nvidia.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
@ -16731,9 +16733,9 @@ F: include/uapi/linux/ppdev.h
|
||||
|
||||
PARAVIRT_OPS INTERFACE
|
||||
M: Juergen Gross <jgross@suse.com>
|
||||
R: Ajay Kaher <akaher@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
R: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
L: x86@kernel.org
|
||||
S: Supported
|
||||
@ -22428,6 +22430,7 @@ S: Maintained
|
||||
W: https://kernsec.org/wiki/index.php/Linux_Kernel_Integrity
|
||||
Q: https://patchwork.kernel.org/project/linux-integrity/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jarkko/linux-tpmdd.git
|
||||
F: Documentation/devicetree/bindings/tpm/
|
||||
F: drivers/char/tpm/
|
||||
|
||||
TPS546D24 DRIVER
|
||||
@ -23652,9 +23655,9 @@ S: Supported
|
||||
F: drivers/misc/vmw_balloon.c
|
||||
|
||||
VMWARE HYPERVISOR INTERFACE
|
||||
M: Ajay Kaher <akaher@vmware.com>
|
||||
M: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
M: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: virtualization@lists.linux.dev
|
||||
L: x86@kernel.org
|
||||
S: Supported
|
||||
@ -23663,34 +23666,34 @@ F: arch/x86/include/asm/vmware.h
|
||||
F: arch/x86/kernel/cpu/vmware.c
|
||||
|
||||
VMWARE PVRDMA DRIVER
|
||||
M: Bryan Tan <bryantan@vmware.com>
|
||||
M: Vishnu Dasa <vdasa@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Bryan Tan <bryan-bt.tan@broadcom.com>
|
||||
M: Vishnu Dasa <vishnu.dasa@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/infiniband/hw/vmw_pvrdma/
|
||||
|
||||
VMWARE PVSCSI DRIVER
|
||||
M: Vishal Bhakta <vbhakta@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Vishal Bhakta <vishal.bhakta@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/vmw_pvscsi.c
|
||||
F: drivers/scsi/vmw_pvscsi.h
|
||||
|
||||
VMWARE VIRTUAL PTP CLOCK DRIVER
|
||||
M: Jeff Sipek <jsipek@vmware.com>
|
||||
R: Ajay Kaher <akaher@vmware.com>
|
||||
R: Alexey Makhalov <amakhalov@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Nick Shi <nick.shi@broadcom.com>
|
||||
R: Ajay Kaher <ajay.kaher@broadcom.com>
|
||||
R: Alexey Makhalov <alexey.amakhalov@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/ptp/ptp_vmw.c
|
||||
|
||||
VMWARE VMCI DRIVER
|
||||
M: Bryan Tan <bryantan@vmware.com>
|
||||
M: Vishnu Dasa <vdasa@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Bryan Tan <bryan-bt.tan@broadcom.com>
|
||||
M: Vishnu Dasa <vishnu.dasa@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/misc/vmw_vmci/
|
||||
@ -23705,16 +23708,16 @@ F: drivers/input/mouse/vmmouse.c
|
||||
F: drivers/input/mouse/vmmouse.h
|
||||
|
||||
VMWARE VMXNET3 ETHERNET DRIVER
|
||||
M: Ronak Doshi <doshir@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Ronak Doshi <ronak.doshi@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/vmxnet3/
|
||||
|
||||
VMWARE VSOCK VMCI TRANSPORT DRIVER
|
||||
M: Bryan Tan <bryantan@vmware.com>
|
||||
M: Vishnu Dasa <vdasa@vmware.com>
|
||||
R: VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
|
||||
M: Bryan Tan <bryan-bt.tan@broadcom.com>
|
||||
M: Vishnu Dasa <vishnu.dasa@broadcom.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: net/vmw_vsock/vmci_transport*
|
||||
|
@ -944,6 +944,8 @@ ap_spi_fp: &spi10 {
|
||||
vddrf-supply = <&pp1300_l2c>;
|
||||
vddch0-supply = <&pp3300_l10c>;
|
||||
max-speed = <3200000>;
|
||||
|
||||
qcom,local-bd-address-broken;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -291,6 +291,21 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
blr x2
|
||||
0:
|
||||
mov_q x0, HCR_HOST_NVHE_FLAGS
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case. Publish the E2H bit early so that
|
||||
* it can be picked up by the init_el2_state macro.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RAO/WI, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x1, SYS_ID_AA64MMFR4_EL1
|
||||
tbz x1, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
orr x0, x0, #HCR_E2H
|
||||
1:
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
@ -303,22 +318,10 @@ SYM_INNER_LABEL(init_el2, SYM_L_LOCAL)
|
||||
|
||||
mov_q x1, INIT_SCTLR_EL1_MMU_OFF
|
||||
|
||||
/*
|
||||
* Compliant CPUs advertise their VHE-onlyness with
|
||||
* ID_AA64MMFR4_EL1.E2H0 < 0. HCR_EL2.E2H can be
|
||||
* RES1 in that case.
|
||||
*
|
||||
* Fruity CPUs seem to have HCR_EL2.E2H set to RES1, but
|
||||
* don't advertise it (they predate this relaxation).
|
||||
*/
|
||||
mrs_s x0, SYS_ID_AA64MMFR4_EL1
|
||||
ubfx x0, x0, #ID_AA64MMFR4_EL1_E2H0_SHIFT, #ID_AA64MMFR4_EL1_E2H0_WIDTH
|
||||
tbnz x0, #(ID_AA64MMFR4_EL1_E2H0_SHIFT + ID_AA64MMFR4_EL1_E2H0_WIDTH - 1), 1f
|
||||
|
||||
mrs x0, hcr_el2
|
||||
and x0, x0, #HCR_E2H
|
||||
cbz x0, 2f
|
||||
1:
|
||||
|
||||
/* Set a sane SCTLR_EL1, the VHE way */
|
||||
pre_disable_mmu_workaround
|
||||
msr_s SYS_SCTLR_EL12, x1
|
||||
|
@ -761,7 +761,6 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
||||
{
|
||||
unsigned int vq;
|
||||
bool active;
|
||||
bool fpsimd_only;
|
||||
enum vec_type task_type;
|
||||
|
||||
memset(header, 0, sizeof(*header));
|
||||
@ -777,12 +776,10 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
||||
case ARM64_VEC_SVE:
|
||||
if (test_tsk_thread_flag(target, TIF_SVE_VL_INHERIT))
|
||||
header->flags |= SVE_PT_VL_INHERIT;
|
||||
fpsimd_only = !test_tsk_thread_flag(target, TIF_SVE);
|
||||
break;
|
||||
case ARM64_VEC_SME:
|
||||
if (test_tsk_thread_flag(target, TIF_SME_VL_INHERIT))
|
||||
header->flags |= SVE_PT_VL_INHERIT;
|
||||
fpsimd_only = false;
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
@ -790,7 +787,7 @@ static void sve_init_header_from_task(struct user_sve_header *header,
|
||||
}
|
||||
|
||||
if (active) {
|
||||
if (fpsimd_only) {
|
||||
if (target->thread.fp_type == FP_STATE_FPSIMD) {
|
||||
header->flags |= SVE_PT_REGS_FPSIMD;
|
||||
} else {
|
||||
header->flags |= SVE_PT_REGS_SVE;
|
||||
|
@ -2597,14 +2597,11 @@ static __init int kvm_arm_init(void)
|
||||
if (err)
|
||||
goto out_hyp;
|
||||
|
||||
if (is_protected_kvm_enabled()) {
|
||||
kvm_info("Protected nVHE mode initialized successfully\n");
|
||||
} else if (in_hyp_mode) {
|
||||
kvm_info("VHE mode initialized successfully\n");
|
||||
} else {
|
||||
char mode = cpus_have_final_cap(ARM64_KVM_HVHE) ? 'h' : 'n';
|
||||
kvm_info("Hyp mode (%cVHE) initialized successfully\n", mode);
|
||||
}
|
||||
kvm_info("%s%sVHE mode initialized successfully\n",
|
||||
in_hyp_mode ? "" : (is_protected_kvm_enabled() ?
|
||||
"Protected " : "Hyp "),
|
||||
in_hyp_mode ? "" : (cpus_have_final_cap(ARM64_KVM_HVHE) ?
|
||||
"h" : "n"));
|
||||
|
||||
/*
|
||||
* FIXME: Do something reasonable if kvm_init() fails after pKVM
|
||||
|
@ -154,7 +154,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt, false);
|
||||
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
|
||||
dsb(ish);
|
||||
__tlbi(vmalle1is);
|
||||
|
@ -528,7 +528,7 @@ static int hyp_unmap_walker(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
dsb(ishst);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), ctx->level);
|
||||
__tlbi_level(vae2is, __TLBI_VADDR(ctx->addr, 0), TLBI_TTL_UNKNOWN);
|
||||
} else {
|
||||
if (ctx->end - ctx->addr < granule)
|
||||
return -EINVAL;
|
||||
@ -843,12 +843,15 @@ static bool stage2_try_break_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
* Perform the appropriate TLB invalidation based on the
|
||||
* evicted pte value (if any).
|
||||
*/
|
||||
if (kvm_pte_table(ctx->old, ctx->level))
|
||||
kvm_tlb_flush_vmid_range(mmu, ctx->addr,
|
||||
kvm_granule_size(ctx->level));
|
||||
else if (kvm_pte_valid(ctx->old))
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
u64 size = kvm_granule_size(ctx->level);
|
||||
u64 addr = ALIGN_DOWN(ctx->addr, size);
|
||||
|
||||
kvm_tlb_flush_vmid_range(mmu, addr, size);
|
||||
} else if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
}
|
||||
}
|
||||
|
||||
if (stage2_pte_is_counted(ctx->old))
|
||||
@ -896,9 +899,13 @@ static void stage2_unmap_put_pte(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
if (kvm_pte_valid(ctx->old)) {
|
||||
kvm_clear_pte(ctx->ptep);
|
||||
|
||||
if (!stage2_unmap_defer_tlb_flush(pgt))
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu,
|
||||
ctx->addr, ctx->level);
|
||||
if (kvm_pte_table(ctx->old, ctx->level)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
} else if (!stage2_unmap_defer_tlb_flush(pgt)) {
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, ctx->addr,
|
||||
ctx->level);
|
||||
}
|
||||
}
|
||||
|
||||
mm_ops->put_page(ctx->ptep);
|
||||
|
@ -171,7 +171,8 @@ void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
|
||||
/* Switch to requested VMID */
|
||||
__tlb_switch_to_guest(mmu, &cxt);
|
||||
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
|
||||
__flush_s2_tlb_range_op(ipas2e1is, start, pages, stride,
|
||||
TLBI_TTL_UNKNOWN);
|
||||
|
||||
dsb(ish);
|
||||
__tlbi(vmalle1is);
|
||||
|
@ -1637,7 +1637,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
|
||||
fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
|
||||
is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
|
||||
|
||||
if (esr_fsc_is_permission_fault(esr)) {
|
||||
if (esr_fsc_is_translation_fault(esr)) {
|
||||
/* Beyond sanitised PARange (which is the IPA limit) */
|
||||
if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
|
||||
kvm_inject_size_fault(vcpu);
|
||||
|
@ -21,7 +21,8 @@
|
||||
|
||||
void __init early_init_devtree(void *params)
|
||||
{
|
||||
__be32 *dtb = (u32 *)__dtb_start;
|
||||
__be32 __maybe_unused *dtb = (u32 *)__dtb_start;
|
||||
|
||||
#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
|
||||
if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
|
||||
OF_DT_HEADER) {
|
||||
@ -30,8 +31,11 @@ void __init early_init_devtree(void *params)
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NIOS2_DTB_SOURCE_BOOL
|
||||
if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
|
||||
params = (void *)__dtb_start;
|
||||
#endif
|
||||
|
||||
early_init_dt_scan(params);
|
||||
}
|
||||
|
@ -151,7 +151,7 @@ endif
|
||||
endif
|
||||
|
||||
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
|
||||
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
|
||||
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg
|
||||
|
||||
ifneq ($(CONFIG_XIP_KERNEL),y)
|
||||
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
|
||||
|
@ -593,6 +593,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
|
||||
return ptep_test_and_clear_young(vma, address, ptep);
|
||||
}
|
||||
|
||||
#define pgprot_nx pgprot_nx
|
||||
static inline pgprot_t pgprot_nx(pgprot_t _prot)
|
||||
{
|
||||
return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
|
||||
}
|
||||
|
||||
#define pgprot_noncached pgprot_noncached
|
||||
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
|
||||
{
|
||||
|
@ -36,7 +36,8 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
|
||||
ulong) \
|
||||
__attribute__((alias(__stringify(___se_##prefix##name)))); \
|
||||
__diag_pop(); \
|
||||
static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
|
||||
static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
|
||||
__used; \
|
||||
static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
|
||||
|
||||
#define SC_RISCV_REGS_TO_ARGS(x, ...) \
|
||||
|
@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
@ -328,7 +328,7 @@ do { \
|
||||
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
long __kr_err; \
|
||||
long __kr_err = 0; \
|
||||
\
|
||||
__put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
|
||||
if (unlikely(__kr_err)) \
|
||||
|
@ -34,7 +34,7 @@
|
||||
#define AT_L3_CACHEGEOMETRY 47
|
||||
|
||||
/* entries in ARCH_DLINFO */
|
||||
#define AT_VECTOR_SIZE_ARCH 9
|
||||
#define AT_VECTOR_SIZE_ARCH 10
|
||||
#define AT_MINSIGSTKSZ 51
|
||||
|
||||
#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
|
||||
|
@ -74,5 +74,5 @@ quiet_cmd_compat_vdsold = VDSOLD $@
|
||||
rm $@.tmp
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_compat_vdsoas = VDSOAS $@
|
||||
quiet_cmd_compat_vdsoas = VDSOAS $@
|
||||
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
|
||||
|
@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
|
||||
*/
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
if (across_pages)
|
||||
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
|
||||
|
||||
@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len)
|
||||
if (across_pages)
|
||||
patch_unmap(FIX_TEXT_POKE1);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(__patch_insn_set);
|
||||
@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
|
||||
if (!riscv_patch_in_stop_machine)
|
||||
lockdep_assert_held(&text_mutex);
|
||||
|
||||
preempt_disable();
|
||||
|
||||
if (across_pages)
|
||||
patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1);
|
||||
|
||||
@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len)
|
||||
if (across_pages)
|
||||
patch_unmap(FIX_TEXT_POKE1);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
NOKPROBE_SYMBOL(__patch_insn_write);
|
||||
|
@ -27,8 +27,6 @@
|
||||
#include <asm/vector.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
register unsigned long gp_in_global __asm__("gp");
|
||||
|
||||
#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
@ -37,7 +35,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
|
||||
|
||||
extern asmlinkage void ret_from_fork(void);
|
||||
|
||||
void arch_cpu_idle(void)
|
||||
void noinstr arch_cpu_idle(void)
|
||||
{
|
||||
cpu_do_idle();
|
||||
}
|
||||
@ -207,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
if (unlikely(args->fn)) {
|
||||
/* Kernel thread */
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
/* Supervisor/Machine, irqs on: */
|
||||
childregs->status = SR_PP | SR_PIE;
|
||||
|
||||
|
@ -119,6 +119,13 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
|
||||
struct __sc_riscv_v_state __user *state = sc_vec;
|
||||
void __user *datap;
|
||||
|
||||
/*
|
||||
* Mark the vstate as clean prior performing the actual copy,
|
||||
* to avoid getting the vstate incorrectly clobbered by the
|
||||
* discarded vector state.
|
||||
*/
|
||||
riscv_v_vstate_set_restore(current, regs);
|
||||
|
||||
/* Copy everything of __sc_riscv_v_state except datap. */
|
||||
err = __copy_from_user(¤t->thread.vstate, &state->v_state,
|
||||
offsetof(struct __riscv_v_ext_state, datap));
|
||||
@ -133,13 +140,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec)
|
||||
* Copy the whole vector content from user space datap. Use
|
||||
* copy_from_user to prevent information leak.
|
||||
*/
|
||||
err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
|
||||
if (unlikely(err))
|
||||
return err;
|
||||
|
||||
riscv_v_vstate_set_restore(current, regs);
|
||||
|
||||
return err;
|
||||
return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize);
|
||||
}
|
||||
#else
|
||||
#define save_v_state(task, regs) (0)
|
||||
|
@ -122,7 +122,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
|
||||
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
|
||||
pr_cont("\n");
|
||||
__show_regs(regs);
|
||||
dump_instr(KERN_EMERG, regs);
|
||||
dump_instr(KERN_INFO, regs);
|
||||
}
|
||||
|
||||
force_sig_fault(signo, code, (void __user *)addr);
|
||||
|
@ -37,6 +37,7 @@ endif
|
||||
|
||||
# Disable -pg to prevent insert call site
|
||||
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
|
||||
CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
|
||||
|
||||
# Disable profiling and instrumentation for VDSO code
|
||||
GCOV_PROFILE := n
|
||||
|
@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
|
||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
|
||||
sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (!pending &&
|
||||
((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
|
||||
(sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip_write_pending;
|
||||
|
||||
if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH ||
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW) {
|
||||
if (!pending)
|
||||
goto skip_write_pending;
|
||||
if ((irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_LOW)
|
||||
goto skip_write_pending;
|
||||
if (!(irqd->state & APLIC_IRQ_STATE_INPUT) &&
|
||||
sm == APLIC_SOURCECFG_SM_LEVEL_HIGH)
|
||||
goto skip_write_pending;
|
||||
}
|
||||
|
||||
if (pending)
|
||||
irqd->state |= APLIC_IRQ_STATE_PENDING;
|
||||
else
|
||||
@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
|
||||
|
||||
static bool aplic_read_input(struct aplic *aplic, u32 irq)
|
||||
{
|
||||
bool ret;
|
||||
unsigned long flags;
|
||||
u32 sourcecfg, sm, raw_input, irq_inverted;
|
||||
struct aplic_irq *irqd;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if (!irq || aplic->nr_irqs <= irq)
|
||||
return false;
|
||||
irqd = &aplic->irqs[irq];
|
||||
|
||||
raw_spin_lock_irqsave(&irqd->lock, flags);
|
||||
ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
|
||||
|
||||
sourcecfg = irqd->sourcecfg;
|
||||
if (sourcecfg & APLIC_SOURCECFG_D)
|
||||
goto skip;
|
||||
|
||||
sm = sourcecfg & APLIC_SOURCECFG_SM_MASK;
|
||||
if (sm == APLIC_SOURCECFG_SM_INACTIVE)
|
||||
goto skip;
|
||||
|
||||
raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0;
|
||||
irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW ||
|
||||
sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0;
|
||||
ret = !!(raw_input ^ irq_inverted);
|
||||
|
||||
skip:
|
||||
raw_spin_unlock_irqrestore(&irqd->lock, flags);
|
||||
|
||||
return ret;
|
||||
|
@ -986,7 +986,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
|
||||
|
||||
static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return copy_isa_ext_reg_indices(vcpu, NULL);;
|
||||
return copy_isa_ext_reg_indices(vcpu, NULL);
|
||||
}
|
||||
|
||||
static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
|
@ -99,7 +99,7 @@ static void __ipi_flush_tlb_range_asid(void *info)
|
||||
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
|
||||
}
|
||||
|
||||
static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
|
||||
static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid,
|
||||
unsigned long start, unsigned long size,
|
||||
unsigned long stride)
|
||||
{
|
||||
@ -200,7 +200,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
__flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
|
||||
__flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID,
|
||||
start, end - start, PAGE_SIZE);
|
||||
}
|
||||
|
||||
|
@ -15,31 +15,31 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
|
||||
static inline int arch_atomic_read(const atomic_t *v)
|
||||
static __always_inline int arch_atomic_read(const atomic_t *v)
|
||||
{
|
||||
return __atomic_read(v);
|
||||
}
|
||||
#define arch_atomic_read arch_atomic_read
|
||||
|
||||
static inline void arch_atomic_set(atomic_t *v, int i)
|
||||
static __always_inline void arch_atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
__atomic_set(v, i);
|
||||
}
|
||||
#define arch_atomic_set arch_atomic_set
|
||||
|
||||
static inline int arch_atomic_add_return(int i, atomic_t *v)
|
||||
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_barrier(i, &v->counter) + i;
|
||||
}
|
||||
#define arch_atomic_add_return arch_atomic_add_return
|
||||
|
||||
static inline int arch_atomic_fetch_add(int i, atomic_t *v)
|
||||
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_barrier(i, &v->counter);
|
||||
}
|
||||
#define arch_atomic_fetch_add arch_atomic_fetch_add
|
||||
|
||||
static inline void arch_atomic_add(int i, atomic_t *v)
|
||||
static __always_inline void arch_atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
__atomic_add(i, &v->counter);
|
||||
}
|
||||
@ -50,11 +50,11 @@ static inline void arch_atomic_add(int i, atomic_t *v)
|
||||
#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
|
||||
|
||||
#define ATOMIC_OPS(op) \
|
||||
static inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__atomic_##op(i, &v->counter); \
|
||||
} \
|
||||
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
||||
static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __atomic_##op##_barrier(i, &v->counter); \
|
||||
}
|
||||
@ -74,7 +74,7 @@ ATOMIC_OPS(xor)
|
||||
|
||||
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return __atomic_cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
@ -82,31 +82,31 @@ static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static inline s64 arch_atomic64_read(const atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return __atomic64_read(v);
|
||||
}
|
||||
#define arch_atomic64_read arch_atomic64_read
|
||||
|
||||
static inline void arch_atomic64_set(atomic64_t *v, s64 i)
|
||||
static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
__atomic64_set(v, i);
|
||||
}
|
||||
#define arch_atomic64_set arch_atomic64_set
|
||||
|
||||
static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_barrier(i, (long *)&v->counter) + i;
|
||||
}
|
||||
#define arch_atomic64_add_return arch_atomic64_add_return
|
||||
|
||||
static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||
static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic64_add_barrier(i, (long *)&v->counter);
|
||||
}
|
||||
#define arch_atomic64_fetch_add arch_atomic64_fetch_add
|
||||
|
||||
static inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
__atomic64_add(i, (long *)&v->counter);
|
||||
}
|
||||
@ -114,20 +114,20 @@ static inline void arch_atomic64_add(s64 i, atomic64_t *v)
|
||||
|
||||
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
return __atomic64_cmpxchg((long *)&v->counter, old, new);
|
||||
}
|
||||
#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
|
||||
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
__atomic64_##op(i, (long *)&v->counter); \
|
||||
} \
|
||||
static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
|
||||
#define ATOMIC64_OPS(op) \
|
||||
static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
__atomic64_##op(i, (long *)&v->counter); \
|
||||
} \
|
||||
static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
|
||||
{ \
|
||||
return __atomic64_##op##_barrier(i, (long *)&v->counter); \
|
||||
}
|
||||
|
||||
ATOMIC64_OPS(and)
|
||||
|
@ -8,7 +8,7 @@
|
||||
#ifndef __ARCH_S390_ATOMIC_OPS__
|
||||
#define __ARCH_S390_ATOMIC_OPS__
|
||||
|
||||
static inline int __atomic_read(const atomic_t *v)
|
||||
static __always_inline int __atomic_read(const atomic_t *v)
|
||||
{
|
||||
int c;
|
||||
|
||||
@ -18,14 +18,14 @@ static inline int __atomic_read(const atomic_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void __atomic_set(atomic_t *v, int i)
|
||||
static __always_inline void __atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
asm volatile(
|
||||
" st %1,%0\n"
|
||||
: "=R" (v->counter) : "d" (i));
|
||||
}
|
||||
|
||||
static inline s64 __atomic64_read(const atomic64_t *v)
|
||||
static __always_inline s64 __atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
s64 c;
|
||||
|
||||
@ -35,7 +35,7 @@ static inline s64 __atomic64_read(const atomic64_t *v)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
static __always_inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
asm volatile(
|
||||
" stg %1,%0\n"
|
||||
@ -45,7 +45,7 @@ static inline void __atomic64_set(atomic64_t *v, s64 i)
|
||||
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
|
||||
static inline op_type op_name(op_type val, op_type *ptr) \
|
||||
static __always_inline op_type op_name(op_type val, op_type *ptr) \
|
||||
{ \
|
||||
op_type old; \
|
||||
\
|
||||
@ -96,7 +96,7 @@ __ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
|
||||
#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
#define __ATOMIC_OP(op_name, op_string) \
|
||||
static inline int op_name(int val, int *ptr) \
|
||||
static __always_inline int op_name(int val, int *ptr) \
|
||||
{ \
|
||||
int old, new; \
|
||||
\
|
||||
@ -122,7 +122,7 @@ __ATOMIC_OPS(__atomic_xor, "xr")
|
||||
#undef __ATOMIC_OPS
|
||||
|
||||
#define __ATOMIC64_OP(op_name, op_string) \
|
||||
static inline long op_name(long val, long *ptr) \
|
||||
static __always_inline long op_name(long val, long *ptr) \
|
||||
{ \
|
||||
long old, new; \
|
||||
\
|
||||
@ -154,7 +154,7 @@ __ATOMIC64_OPS(__atomic64_xor, "xgr")
|
||||
|
||||
#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
|
||||
|
||||
static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
static __always_inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
{
|
||||
asm volatile(
|
||||
" cs %[old],%[new],%[ptr]"
|
||||
@ -164,7 +164,7 @@ static inline int __atomic_cmpxchg(int *ptr, int old, int new)
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
static __always_inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
{
|
||||
int old_expected = old;
|
||||
|
||||
@ -176,7 +176,7 @@ static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
|
||||
return old == old_expected;
|
||||
}
|
||||
|
||||
static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
static __always_inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
{
|
||||
asm volatile(
|
||||
" csg %[old],%[new],%[ptr]"
|
||||
@ -186,7 +186,7 @@ static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
|
||||
return old;
|
||||
}
|
||||
|
||||
static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
|
||||
static __always_inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
|
||||
{
|
||||
long old_expected = old;
|
||||
|
||||
|
@ -12,12 +12,12 @@
|
||||
#define PREEMPT_NEED_RESCHED 0x80000000
|
||||
#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
static __always_inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count) & ~PREEMPT_NEED_RESCHED;
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
static __always_inline void preempt_count_set(int pc)
|
||||
{
|
||||
int old, new;
|
||||
|
||||
@ -29,22 +29,22 @@ static inline void preempt_count_set(int pc)
|
||||
old, new) != old);
|
||||
}
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
static __always_inline void set_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_and(~PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
static __always_inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
__atomic_or(PREEMPT_NEED_RESCHED, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
static __always_inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return !(READ_ONCE(S390_lowcore.preempt_count) & PREEMPT_NEED_RESCHED);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
static __always_inline void __preempt_count_add(int val)
|
||||
{
|
||||
/*
|
||||
* With some obscure config options and CONFIG_PROFILE_ALL_BRANCHES
|
||||
@ -59,17 +59,17 @@ static inline void __preempt_count_add(int val)
|
||||
__atomic_add(val, &S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
static __always_inline void __preempt_count_sub(int val)
|
||||
{
|
||||
__preempt_count_add(-val);
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return __atomic_add(-1, &S390_lowcore.preempt_count) == 1;
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(READ_ONCE(S390_lowcore.preempt_count) ==
|
||||
preempt_offset);
|
||||
@ -79,45 +79,45 @@ static inline bool should_resched(int preempt_offset)
|
||||
|
||||
#define PREEMPT_ENABLED (0)
|
||||
|
||||
static inline int preempt_count(void)
|
||||
static __always_inline int preempt_count(void)
|
||||
{
|
||||
return READ_ONCE(S390_lowcore.preempt_count);
|
||||
}
|
||||
|
||||
static inline void preempt_count_set(int pc)
|
||||
static __always_inline void preempt_count_set(int pc)
|
||||
{
|
||||
S390_lowcore.preempt_count = pc;
|
||||
}
|
||||
|
||||
static inline void set_preempt_need_resched(void)
|
||||
static __always_inline void set_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void clear_preempt_need_resched(void)
|
||||
static __always_inline void clear_preempt_need_resched(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool test_preempt_need_resched(void)
|
||||
static __always_inline bool test_preempt_need_resched(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_add(int val)
|
||||
static __always_inline void __preempt_count_add(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count += val;
|
||||
}
|
||||
|
||||
static inline void __preempt_count_sub(int val)
|
||||
static __always_inline void __preempt_count_sub(int val)
|
||||
{
|
||||
S390_lowcore.preempt_count -= val;
|
||||
}
|
||||
|
||||
static inline bool __preempt_count_dec_and_test(void)
|
||||
static __always_inline bool __preempt_count_dec_and_test(void)
|
||||
{
|
||||
return !--S390_lowcore.preempt_count && tif_need_resched();
|
||||
}
|
||||
|
||||
static inline bool should_resched(int preempt_offset)
|
||||
static __always_inline bool should_resched(int preempt_offset)
|
||||
{
|
||||
return unlikely(preempt_count() == preempt_offset &&
|
||||
tif_need_resched());
|
||||
|
@ -635,6 +635,7 @@ SYM_DATA_START_LOCAL(daton_psw)
|
||||
SYM_DATA_END(daton_psw)
|
||||
|
||||
.section .rodata, "a"
|
||||
.balign 8
|
||||
#define SYSCALL(esame,emu) .quad __s390x_ ## esame
|
||||
SYM_DATA_START(sys_call_table)
|
||||
#include "asm/syscall_table.h"
|
||||
|
@ -90,7 +90,6 @@ static void paicrypt_event_destroy(struct perf_event *event)
|
||||
event->cpu);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
cpump->event = NULL;
|
||||
static_branch_dec(&pai_key);
|
||||
mutex_lock(&pai_reserve_mutex);
|
||||
debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
|
||||
@ -356,10 +355,15 @@ static int paicrypt_add(struct perf_event *event, int flags)
|
||||
|
||||
static void paicrypt_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
if (!event->attr.sample_period) /* Counting */
|
||||
struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
|
||||
struct paicrypt_map *cpump = mp->mapptr;
|
||||
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
paicrypt_read(event);
|
||||
else /* Sampling */
|
||||
} else { /* Sampling */
|
||||
perf_sched_cb_dec(event->pmu);
|
||||
cpump->event = NULL;
|
||||
}
|
||||
event->hw.state = PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,6 @@ static void paiext_event_destroy(struct perf_event *event)
|
||||
|
||||
free_page(PAI_SAVE_AREA(event));
|
||||
mutex_lock(&paiext_reserve_mutex);
|
||||
cpump->event = NULL;
|
||||
if (refcount_dec_and_test(&cpump->refcnt)) /* Last reference gone */
|
||||
paiext_free(mp);
|
||||
paiext_root_free();
|
||||
@ -362,10 +361,15 @@ static int paiext_add(struct perf_event *event, int flags)
|
||||
|
||||
static void paiext_stop(struct perf_event *event, int flags)
|
||||
{
|
||||
if (!event->attr.sample_period) /* Counting */
|
||||
struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
|
||||
struct paiext_map *cpump = mp->mapptr;
|
||||
|
||||
if (!event->attr.sample_period) { /* Counting */
|
||||
paiext_read(event);
|
||||
else /* Sampling */
|
||||
} else { /* Sampling */
|
||||
perf_sched_cb_dec(event->pmu);
|
||||
cpump->event = NULL;
|
||||
}
|
||||
event->hw.state = PERF_HES_STOPPED;
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ static enum fault_type get_fault_type(struct pt_regs *regs)
|
||||
if (!IS_ENABLED(CONFIG_PGSTE))
|
||||
return KERNEL_FAULT;
|
||||
gmap = (struct gmap *)S390_lowcore.gmap;
|
||||
if (regs->cr1 == gmap->asce)
|
||||
if (gmap && gmap->asce == regs->cr1)
|
||||
return GMAP_FAULT;
|
||||
return KERNEL_FAULT;
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ extern void callthunks_patch_builtin_calls(void);
|
||||
extern void callthunks_patch_module_calls(struct callthunk_sites *sites,
|
||||
struct module *mod);
|
||||
extern void *callthunks_translate_call_dest(void *dest);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func);
|
||||
extern int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip);
|
||||
#else
|
||||
static __always_inline void callthunks_patch_builtin_calls(void) {}
|
||||
static __always_inline void
|
||||
@ -128,7 +128,7 @@ static __always_inline void *callthunks_translate_call_dest(void *dest)
|
||||
return dest;
|
||||
}
|
||||
static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
||||
void *func)
|
||||
void *func, void *ip)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ enum cpuid_leafs
|
||||
CPUID_7_EDX,
|
||||
CPUID_8000_001F_EAX,
|
||||
CPUID_8000_0021_EAX,
|
||||
CPUID_LNX_5,
|
||||
NR_CPUID_WORDS,
|
||||
};
|
||||
|
||||
#define X86_CAP_FMT_NUM "%d:%d"
|
||||
|
@ -694,6 +694,7 @@ enum sev_cmd_id {
|
||||
|
||||
struct kvm_sev_cmd {
|
||||
__u32 id;
|
||||
__u32 pad0;
|
||||
__u64 data;
|
||||
__u32 error;
|
||||
__u32 sev_fd;
|
||||
@ -704,28 +705,35 @@ struct kvm_sev_launch_start {
|
||||
__u32 policy;
|
||||
__u64 dh_uaddr;
|
||||
__u32 dh_len;
|
||||
__u32 pad0;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad1;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_update_data {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
|
||||
struct kvm_sev_launch_secret {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
struct kvm_sev_launch_measure {
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_guest_status {
|
||||
@ -738,33 +746,43 @@ struct kvm_sev_dbg {
|
||||
__u64 src_uaddr;
|
||||
__u64 dst_uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_attestation_report {
|
||||
__u8 mnonce[16];
|
||||
__u64 uaddr;
|
||||
__u32 len;
|
||||
__u32 pad0;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_start {
|
||||
__u32 policy;
|
||||
__u32 pad0;
|
||||
__u64 pdh_cert_uaddr;
|
||||
__u32 pdh_cert_len;
|
||||
__u32 pad1;
|
||||
__u64 plat_certs_uaddr;
|
||||
__u32 plat_certs_len;
|
||||
__u32 pad2;
|
||||
__u64 amd_certs_uaddr;
|
||||
__u32 amd_certs_len;
|
||||
__u32 pad3;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad4;
|
||||
};
|
||||
|
||||
struct kvm_sev_send_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_start {
|
||||
@ -772,17 +790,22 @@ struct kvm_sev_receive_start {
|
||||
__u32 policy;
|
||||
__u64 pdh_uaddr;
|
||||
__u32 pdh_len;
|
||||
__u32 pad0;
|
||||
__u64 session_uaddr;
|
||||
__u32 session_len;
|
||||
__u32 pad1;
|
||||
};
|
||||
|
||||
struct kvm_sev_receive_update_data {
|
||||
__u64 hdr_uaddr;
|
||||
__u32 hdr_len;
|
||||
__u32 pad0;
|
||||
__u64 guest_uaddr;
|
||||
__u32 guest_len;
|
||||
__u32 pad1;
|
||||
__u64 trans_uaddr;
|
||||
__u32 trans_len;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
|
||||
|
@ -142,7 +142,6 @@ struct kvm_vcpu_pv_apf_data {
|
||||
__u32 token;
|
||||
|
||||
__u8 pad[56];
|
||||
__u32 enabled;
|
||||
};
|
||||
|
||||
#define KVM_PV_EOI_BIT 0
|
||||
|
@ -314,7 +314,7 @@ static bool is_callthunk(void *addr)
|
||||
return !bcmp(pad, insn_buff, tmpl_size);
|
||||
}
|
||||
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
unsigned int tmpl_size = SKL_TMPL_SIZE;
|
||||
u8 insn_buff[MAX_PATCH_LEN];
|
||||
@ -327,7 +327,7 @@ int x86_call_depth_emit_accounting(u8 **pprog, void *func)
|
||||
return 0;
|
||||
|
||||
memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
|
||||
apply_relocation(insn_buff, tmpl_size, *pprog,
|
||||
apply_relocation(insn_buff, tmpl_size, ip,
|
||||
skl_call_thunk_template, tmpl_size);
|
||||
|
||||
memcpy(*pprog, insn_buff, tmpl_size);
|
||||
|
@ -65,6 +65,7 @@ static int __init parse_no_stealacc(char *arg)
|
||||
|
||||
early_param("no-steal-acc", parse_no_stealacc);
|
||||
|
||||
static DEFINE_PER_CPU_READ_MOSTLY(bool, async_pf_enabled);
|
||||
static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
|
||||
DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64) __visible;
|
||||
static int has_steal_clock = 0;
|
||||
@ -244,7 +245,7 @@ noinstr u32 kvm_read_and_reset_apf_flags(void)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (__this_cpu_read(apf_reason.enabled)) {
|
||||
if (__this_cpu_read(async_pf_enabled)) {
|
||||
flags = __this_cpu_read(apf_reason.flags);
|
||||
__this_cpu_write(apf_reason.flags, 0);
|
||||
}
|
||||
@ -295,7 +296,7 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
|
||||
|
||||
inc_irq_stat(irq_hv_callback_count);
|
||||
|
||||
if (__this_cpu_read(apf_reason.enabled)) {
|
||||
if (__this_cpu_read(async_pf_enabled)) {
|
||||
token = __this_cpu_read(apf_reason.token);
|
||||
kvm_async_pf_task_wake(token);
|
||||
__this_cpu_write(apf_reason.token, 0);
|
||||
@ -362,7 +363,7 @@ static void kvm_guest_cpu_init(void)
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_INT, HYPERVISOR_CALLBACK_VECTOR);
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
|
||||
__this_cpu_write(apf_reason.enabled, 1);
|
||||
__this_cpu_write(async_pf_enabled, true);
|
||||
pr_debug("setup async PF for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
||||
@ -383,11 +384,11 @@ static void kvm_guest_cpu_init(void)
|
||||
|
||||
static void kvm_pv_disable_apf(void)
|
||||
{
|
||||
if (!__this_cpu_read(apf_reason.enabled))
|
||||
if (!__this_cpu_read(async_pf_enabled))
|
||||
return;
|
||||
|
||||
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
|
||||
__this_cpu_write(apf_reason.enabled, 0);
|
||||
__this_cpu_write(async_pf_enabled, false);
|
||||
|
||||
pr_debug("disable async PF for cpu %d\n", smp_processor_id());
|
||||
}
|
||||
|
@ -189,15 +189,15 @@ static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
|
||||
const char *sig)
|
||||
static struct kvm_hypervisor_cpuid __kvm_get_hypervisor_cpuid(struct kvm_cpuid_entry2 *entries,
|
||||
int nent, const char *sig)
|
||||
{
|
||||
struct kvm_hypervisor_cpuid cpuid = {};
|
||||
struct kvm_cpuid_entry2 *entry;
|
||||
u32 base;
|
||||
|
||||
for_each_possible_hypervisor_cpuid_base(base) {
|
||||
entry = kvm_find_cpuid_entry(vcpu, base);
|
||||
entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
|
||||
if (entry) {
|
||||
u32 signature[3];
|
||||
@ -217,22 +217,29 @@ static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcp
|
||||
return cpuid;
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
|
||||
struct kvm_cpuid_entry2 *entries, int nent)
|
||||
static struct kvm_hypervisor_cpuid kvm_get_hypervisor_cpuid(struct kvm_vcpu *vcpu,
|
||||
const char *sig)
|
||||
{
|
||||
return __kvm_get_hypervisor_cpuid(vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent, sig);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_cpuid_entry2 *entries,
|
||||
int nent, u32 kvm_cpuid_base)
|
||||
{
|
||||
return cpuid_entry2_find(entries, nent, kvm_cpuid_base | KVM_CPUID_FEATURES,
|
||||
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 base = vcpu->arch.kvm_cpuid.base;
|
||||
|
||||
if (!base)
|
||||
return NULL;
|
||||
|
||||
return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES,
|
||||
KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
}
|
||||
|
||||
static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent);
|
||||
return __kvm_find_kvm_cpuid_features(vcpu->arch.cpuid_entries,
|
||||
vcpu->arch.cpuid_nent, base);
|
||||
}
|
||||
|
||||
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
|
||||
@ -266,6 +273,7 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
int nent)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
struct kvm_hypervisor_cpuid kvm_cpuid;
|
||||
|
||||
best = cpuid_entry2_find(entries, nent, 1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
if (best) {
|
||||
@ -292,10 +300,12 @@ static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_e
|
||||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
|
||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
||||
|
||||
best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
|
||||
if (kvm_hlt_in_guest(vcpu->kvm) && best &&
|
||||
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
|
||||
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
|
||||
kvm_cpuid = __kvm_get_hypervisor_cpuid(entries, nent, KVM_SIGNATURE);
|
||||
if (kvm_cpuid.base) {
|
||||
best = __kvm_find_kvm_cpuid_features(entries, nent, kvm_cpuid.base);
|
||||
if (kvm_hlt_in_guest(vcpu->kvm) && best)
|
||||
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
|
||||
}
|
||||
|
||||
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
|
||||
best = cpuid_entry2_find(entries, nent, 0x1, KVM_CPUID_INDEX_NOT_SIGNIFICANT);
|
||||
|
@ -102,10 +102,12 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
||||
*/
|
||||
static __always_inline void reverse_cpuid_check(unsigned int x86_leaf)
|
||||
{
|
||||
BUILD_BUG_ON(NR_CPUID_WORDS != NCAPINTS);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_1);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_2);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_3);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_4);
|
||||
BUILD_BUG_ON(x86_leaf == CPUID_LNX_5);
|
||||
BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
|
||||
BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
|
||||
}
|
||||
|
@ -84,9 +84,10 @@ struct enc_region {
|
||||
};
|
||||
|
||||
/* Called with the sev_bitmap_lock held, or on shutdown */
|
||||
static int sev_flush_asids(int min_asid, int max_asid)
|
||||
static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
int ret, asid, error = 0;
|
||||
int ret, error = 0;
|
||||
unsigned int asid;
|
||||
|
||||
/* Check if there are any ASIDs to reclaim before performing a flush */
|
||||
asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
|
||||
@ -116,7 +117,7 @@ static inline bool is_mirroring_enc_context(struct kvm *kvm)
|
||||
}
|
||||
|
||||
/* Must be called with the sev_bitmap_lock held */
|
||||
static bool __sev_recycle_asids(int min_asid, int max_asid)
|
||||
static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
|
||||
{
|
||||
if (sev_flush_asids(min_asid, max_asid))
|
||||
return false;
|
||||
@ -143,8 +144,20 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
|
||||
|
||||
static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
{
|
||||
int asid, min_asid, max_asid, ret;
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
* Note: min ASID can end up larger than the max if basic SEV support is
|
||||
* effectively disabled by disallowing use of ASIDs for SEV guests.
|
||||
*/
|
||||
unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
unsigned int asid;
|
||||
bool retry = true;
|
||||
int ret;
|
||||
|
||||
if (min_asid > max_asid)
|
||||
return -ENOTTY;
|
||||
|
||||
WARN_ON(sev->misc_cg);
|
||||
sev->misc_cg = get_current_misc_cg();
|
||||
@ -157,12 +170,6 @@ static int sev_asid_new(struct kvm_sev_info *sev)
|
||||
|
||||
mutex_lock(&sev_bitmap_lock);
|
||||
|
||||
/*
|
||||
* SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
|
||||
* SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
|
||||
*/
|
||||
min_asid = sev->es_active ? 1 : min_sev_asid;
|
||||
max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
|
||||
again:
|
||||
asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
|
||||
if (asid > max_asid) {
|
||||
@ -179,7 +186,8 @@ again:
|
||||
|
||||
mutex_unlock(&sev_bitmap_lock);
|
||||
|
||||
return asid;
|
||||
sev->asid = asid;
|
||||
return 0;
|
||||
e_uncharge:
|
||||
sev_misc_cg_uncharge(sev);
|
||||
put_misc_cg(sev->misc_cg);
|
||||
@ -187,7 +195,7 @@ e_uncharge:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sev_get_asid(struct kvm *kvm)
|
||||
static unsigned int sev_get_asid(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
|
||||
@ -247,21 +255,19 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
|
||||
{
|
||||
struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
|
||||
struct sev_platform_init_args init_args = {0};
|
||||
int asid, ret;
|
||||
int ret;
|
||||
|
||||
if (kvm->created_vcpus)
|
||||
return -EINVAL;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (unlikely(sev->active))
|
||||
return ret;
|
||||
return -EINVAL;
|
||||
|
||||
sev->active = true;
|
||||
sev->es_active = argp->id == KVM_SEV_ES_INIT;
|
||||
asid = sev_asid_new(sev);
|
||||
if (asid < 0)
|
||||
ret = sev_asid_new(sev);
|
||||
if (ret)
|
||||
goto e_no_asid;
|
||||
sev->asid = asid;
|
||||
|
||||
init_args.probe = false;
|
||||
ret = sev_platform_init(&init_args);
|
||||
@ -287,8 +293,8 @@ e_no_asid:
|
||||
|
||||
static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
|
||||
{
|
||||
unsigned int asid = sev_get_asid(kvm);
|
||||
struct sev_data_activate activate;
|
||||
int asid = sev_get_asid(kvm);
|
||||
int ret;
|
||||
|
||||
/* activate ASID on the given handle */
|
||||
@ -2240,8 +2246,10 @@ void __init sev_hardware_setup(void)
|
||||
goto out;
|
||||
}
|
||||
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
if (min_sev_asid <= max_sev_asid) {
|
||||
sev_asid_count = max_sev_asid - min_sev_asid + 1;
|
||||
WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
|
||||
}
|
||||
sev_supported = true;
|
||||
|
||||
/* SEV-ES support requested? */
|
||||
@ -2272,7 +2280,9 @@ void __init sev_hardware_setup(void)
|
||||
out:
|
||||
if (boot_cpu_has(X86_FEATURE_SEV))
|
||||
pr_info("SEV %s (ASIDs %u - %u)\n",
|
||||
sev_supported ? "enabled" : "disabled",
|
||||
sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
|
||||
"unusable" :
|
||||
"disabled",
|
||||
min_sev_asid, max_sev_asid);
|
||||
if (boot_cpu_has(X86_FEATURE_SEV_ES))
|
||||
pr_info("SEV-ES %s (ASIDs %u - %u)\n",
|
||||
@ -2320,7 +2330,7 @@ int sev_cpu_init(struct svm_cpu_data *sd)
|
||||
*/
|
||||
static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
|
||||
{
|
||||
int asid = to_kvm_svm(vcpu->kvm)->sev_info.asid;
|
||||
unsigned int asid = sev_get_asid(vcpu->kvm);
|
||||
|
||||
/*
|
||||
* Note! The address must be a kernel address, as regular page walk
|
||||
@ -2638,7 +2648,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
|
||||
void pre_sev_run(struct vcpu_svm *svm, int cpu)
|
||||
{
|
||||
struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
|
||||
int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
unsigned int asid = sev_get_asid(svm->vcpu.kvm);
|
||||
|
||||
/* Assign the asid allocated with this SEV guest */
|
||||
svm->asid = asid;
|
||||
|
@ -735,13 +735,13 @@ TRACE_EVENT(kvm_nested_intr_vmexit,
|
||||
* Tracepoint for nested #vmexit because of interrupt pending
|
||||
*/
|
||||
TRACE_EVENT(kvm_invlpga,
|
||||
TP_PROTO(__u64 rip, int asid, u64 address),
|
||||
TP_PROTO(__u64 rip, unsigned int asid, u64 address),
|
||||
TP_ARGS(rip, asid, address),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( __u64, rip )
|
||||
__field( int, asid )
|
||||
__field( __u64, address )
|
||||
__field( __u64, rip )
|
||||
__field( unsigned int, asid )
|
||||
__field( __u64, address )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
@ -750,7 +750,7 @@ TRACE_EVENT(kvm_invlpga,
|
||||
__entry->address = address;
|
||||
),
|
||||
|
||||
TP_printk("rip: 0x%016llx asid: %d address: 0x%016llx",
|
||||
TP_printk("rip: 0x%016llx asid: %u address: 0x%016llx",
|
||||
__entry->rip, __entry->asid, __entry->address)
|
||||
);
|
||||
|
||||
|
@ -228,8 +228,11 @@ SYM_CODE_END(srso_return_thunk)
|
||||
#else /* !CONFIG_MITIGATION_SRSO */
|
||||
/* Dummy for the alternative in CALL_UNTRAIN_RET. */
|
||||
SYM_CODE_START(srso_alias_untrain_ret)
|
||||
RET
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
#define JMP_SRSO_UNTRAIN_RET "ud2"
|
||||
#endif /* CONFIG_MITIGATION_SRSO */
|
||||
|
||||
|
@ -947,6 +947,38 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
memtype_free(paddr, paddr + size);
|
||||
}
|
||||
|
||||
static int get_pat_info(struct vm_area_struct *vma, resource_size_t *paddr,
|
||||
pgprot_t *pgprot)
|
||||
{
|
||||
unsigned long prot;
|
||||
|
||||
VM_WARN_ON_ONCE(!(vma->vm_flags & VM_PAT));
|
||||
|
||||
/*
|
||||
* We need the starting PFN and cachemode used for track_pfn_remap()
|
||||
* that covered the whole VMA. For most mappings, we can obtain that
|
||||
* information from the page tables. For COW mappings, we might now
|
||||
* suddenly have anon folios mapped and follow_phys() will fail.
|
||||
*
|
||||
* Fallback to using vma->vm_pgoff, see remap_pfn_range_notrack(), to
|
||||
* detect the PFN. If we need the cachemode as well, we're out of luck
|
||||
* for now and have to fail fork().
|
||||
*/
|
||||
if (!follow_phys(vma, vma->vm_start, 0, &prot, paddr)) {
|
||||
if (pgprot)
|
||||
*pgprot = __pgprot(prot);
|
||||
return 0;
|
||||
}
|
||||
if (is_cow_mapping(vma->vm_flags)) {
|
||||
if (pgprot)
|
||||
return -EINVAL;
|
||||
*paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_copy is called when vma that is covering the pfnmap gets
|
||||
* copied through copy_page_range().
|
||||
@ -957,20 +989,13 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
int track_pfn_copy(struct vm_area_struct *vma)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_size = vma->vm_end - vma->vm_start;
|
||||
pgprot_t pgprot;
|
||||
|
||||
if (vma->vm_flags & VM_PAT) {
|
||||
/*
|
||||
* reserve the whole chunk covered by vma. We need the
|
||||
* starting address and protection from pte.
|
||||
*/
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, &pgprot))
|
||||
return -EINVAL;
|
||||
}
|
||||
pgprot = __pgprot(prot);
|
||||
/* reserve the whole chunk covered by vma. */
|
||||
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
||||
}
|
||||
|
||||
@ -1045,7 +1070,6 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size, bool mm_wr_locked)
|
||||
{
|
||||
resource_size_t paddr;
|
||||
unsigned long prot;
|
||||
|
||||
if (vma && !(vma->vm_flags & VM_PAT))
|
||||
return;
|
||||
@ -1053,11 +1077,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
/* free the chunk starting from pfn or the whole chunk */
|
||||
paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
||||
if (!paddr && !size) {
|
||||
if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
if (get_pat_info(vma, &paddr, NULL))
|
||||
return;
|
||||
}
|
||||
|
||||
size = vma->vm_end - vma->vm_start;
|
||||
}
|
||||
free_pfn_range(paddr, size);
|
||||
|
@ -480,7 +480,7 @@ static int emit_call(u8 **pprog, void *func, void *ip)
|
||||
static int emit_rsb_call(u8 **pprog, void *func, void *ip)
|
||||
{
|
||||
OPTIMIZER_HIDE_VAR(func);
|
||||
x86_call_depth_emit_accounting(pprog, func);
|
||||
ip += x86_call_depth_emit_accounting(pprog, func, ip);
|
||||
return emit_patch(pprog, func, ip, 0xE8);
|
||||
}
|
||||
|
||||
@ -1972,20 +1972,17 @@ populate_extable:
|
||||
|
||||
/* call */
|
||||
case BPF_JMP | BPF_CALL: {
|
||||
int offs;
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
func = (u8 *) __bpf_call_base + imm32;
|
||||
if (tail_call_reachable) {
|
||||
RESTORE_TAIL_CALL_CNT(bpf_prog->aux->stack_depth);
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = 7 + x86_call_depth_emit_accounting(&prog, func);
|
||||
} else {
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
offs = x86_call_depth_emit_accounting(&prog, func);
|
||||
ip += 7;
|
||||
}
|
||||
if (emit_call(&prog, func, image + addrs[i - 1] + offs))
|
||||
if (!imm32)
|
||||
return -EINVAL;
|
||||
ip += x86_call_depth_emit_accounting(&prog, func, ip);
|
||||
if (emit_call(&prog, func, ip))
|
||||
return -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -2835,7 +2832,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *rw_im
|
||||
* Direct-call fentry stub, as such it needs accounting for the
|
||||
* __fentry__ call.
|
||||
*/
|
||||
x86_call_depth_emit_accounting(&prog, NULL);
|
||||
x86_call_depth_emit_accounting(&prog, NULL, image);
|
||||
}
|
||||
EMIT1(0x55); /* push rbp */
|
||||
EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
|
||||
|
84
block/bdev.c
84
block/bdev.c
@ -583,9 +583,6 @@ static void bd_finish_claiming(struct block_device *bdev, void *holder,
|
||||
mutex_unlock(&bdev->bd_holder_lock);
|
||||
bd_clear_claiming(whole, holder);
|
||||
mutex_unlock(&bdev_lock);
|
||||
|
||||
if (hops && hops->get_holder)
|
||||
hops->get_holder(holder);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -608,7 +605,6 @@ EXPORT_SYMBOL(bd_abort_claiming);
|
||||
static void bd_end_claim(struct block_device *bdev, void *holder)
|
||||
{
|
||||
struct block_device *whole = bdev_whole(bdev);
|
||||
const struct blk_holder_ops *hops = bdev->bd_holder_ops;
|
||||
bool unblock = false;
|
||||
|
||||
/*
|
||||
@ -631,9 +627,6 @@ static void bd_end_claim(struct block_device *bdev, void *holder)
|
||||
whole->bd_holder = NULL;
|
||||
mutex_unlock(&bdev_lock);
|
||||
|
||||
if (hops && hops->put_holder)
|
||||
hops->put_holder(holder);
|
||||
|
||||
/*
|
||||
* If this was the last claim, remove holder link and unblock evpoll if
|
||||
* it was a write holder.
|
||||
@ -776,17 +769,17 @@ void blkdev_put_no_open(struct block_device *bdev)
|
||||
|
||||
static bool bdev_writes_blocked(struct block_device *bdev)
|
||||
{
|
||||
return bdev->bd_writers == -1;
|
||||
return bdev->bd_writers < 0;
|
||||
}
|
||||
|
||||
static void bdev_block_writes(struct block_device *bdev)
|
||||
{
|
||||
bdev->bd_writers = -1;
|
||||
bdev->bd_writers--;
|
||||
}
|
||||
|
||||
static void bdev_unblock_writes(struct block_device *bdev)
|
||||
{
|
||||
bdev->bd_writers = 0;
|
||||
bdev->bd_writers++;
|
||||
}
|
||||
|
||||
static bool bdev_may_open(struct block_device *bdev, blk_mode_t mode)
|
||||
@ -813,6 +806,11 @@ static void bdev_claim_write_access(struct block_device *bdev, blk_mode_t mode)
|
||||
bdev->bd_writers++;
|
||||
}
|
||||
|
||||
static inline bool bdev_unclaimed(const struct file *bdev_file)
|
||||
{
|
||||
return bdev_file->private_data == BDEV_I(bdev_file->f_mapping->host);
|
||||
}
|
||||
|
||||
static void bdev_yield_write_access(struct file *bdev_file)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
@ -820,14 +818,15 @@ static void bdev_yield_write_access(struct file *bdev_file)
|
||||
if (bdev_allow_write_mounted)
|
||||
return;
|
||||
|
||||
if (bdev_unclaimed(bdev_file))
|
||||
return;
|
||||
|
||||
bdev = file_bdev(bdev_file);
|
||||
/* Yield exclusive or shared write access. */
|
||||
if (bdev_file->f_mode & FMODE_WRITE) {
|
||||
if (bdev_writes_blocked(bdev))
|
||||
bdev_unblock_writes(bdev);
|
||||
else
|
||||
bdev->bd_writers--;
|
||||
}
|
||||
|
||||
if (bdev_file->f_mode & FMODE_WRITE_RESTRICTED)
|
||||
bdev_unblock_writes(bdev);
|
||||
else if (bdev_file->f_mode & FMODE_WRITE)
|
||||
bdev->bd_writers--;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -907,6 +906,8 @@ int bdev_open(struct block_device *bdev, blk_mode_t mode, void *holder,
|
||||
bdev_file->f_mode |= FMODE_BUF_RASYNC | FMODE_CAN_ODIRECT;
|
||||
if (bdev_nowait(bdev))
|
||||
bdev_file->f_mode |= FMODE_NOWAIT;
|
||||
if (mode & BLK_OPEN_RESTRICT_WRITES)
|
||||
bdev_file->f_mode |= FMODE_WRITE_RESTRICTED;
|
||||
bdev_file->f_mapping = bdev->bd_inode->i_mapping;
|
||||
bdev_file->f_wb_err = filemap_sample_wb_err(bdev_file->f_mapping);
|
||||
bdev_file->private_data = holder;
|
||||
@ -1012,6 +1013,20 @@ struct file *bdev_file_open_by_path(const char *path, blk_mode_t mode,
|
||||
}
|
||||
EXPORT_SYMBOL(bdev_file_open_by_path);
|
||||
|
||||
static inline void bd_yield_claim(struct file *bdev_file)
|
||||
{
|
||||
struct block_device *bdev = file_bdev(bdev_file);
|
||||
void *holder = bdev_file->private_data;
|
||||
|
||||
lockdep_assert_held(&bdev->bd_disk->open_mutex);
|
||||
|
||||
if (WARN_ON_ONCE(IS_ERR_OR_NULL(holder)))
|
||||
return;
|
||||
|
||||
if (!bdev_unclaimed(bdev_file))
|
||||
bd_end_claim(bdev, holder);
|
||||
}
|
||||
|
||||
void bdev_release(struct file *bdev_file)
|
||||
{
|
||||
struct block_device *bdev = file_bdev(bdev_file);
|
||||
@ -1036,7 +1051,7 @@ void bdev_release(struct file *bdev_file)
|
||||
bdev_yield_write_access(bdev_file);
|
||||
|
||||
if (holder)
|
||||
bd_end_claim(bdev, holder);
|
||||
bd_yield_claim(bdev_file);
|
||||
|
||||
/*
|
||||
* Trigger event checking and tell drivers to flush MEDIA_CHANGE
|
||||
@ -1056,6 +1071,39 @@ put_no_open:
|
||||
blkdev_put_no_open(bdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* bdev_fput - yield claim to the block device and put the file
|
||||
* @bdev_file: open block device
|
||||
*
|
||||
* Yield claim on the block device and put the file. Ensure that the
|
||||
* block device can be reclaimed before the file is closed which is a
|
||||
* deferred operation.
|
||||
*/
|
||||
void bdev_fput(struct file *bdev_file)
|
||||
{
|
||||
if (WARN_ON_ONCE(bdev_file->f_op != &def_blk_fops))
|
||||
return;
|
||||
|
||||
if (bdev_file->private_data) {
|
||||
struct block_device *bdev = file_bdev(bdev_file);
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
|
||||
mutex_lock(&disk->open_mutex);
|
||||
bdev_yield_write_access(bdev_file);
|
||||
bd_yield_claim(bdev_file);
|
||||
/*
|
||||
* Tell release we already gave up our hold on the
|
||||
* device and if write restrictions are available that
|
||||
* we already gave up write access to the device.
|
||||
*/
|
||||
bdev_file->private_data = BDEV_I(bdev_file->f_mapping->host);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
}
|
||||
|
||||
fput(bdev_file);
|
||||
}
|
||||
EXPORT_SYMBOL(bdev_fput);
|
||||
|
||||
/**
|
||||
* lookup_bdev() - Look up a struct block_device by name.
|
||||
* @pathname: Name of the block device in the filesystem.
|
||||
|
@ -96,7 +96,7 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
||||
unsigned long arg)
|
||||
{
|
||||
uint64_t range[2];
|
||||
uint64_t start, len;
|
||||
uint64_t start, len, end;
|
||||
struct inode *inode = bdev->bd_inode;
|
||||
int err;
|
||||
|
||||
@ -117,7 +117,8 @@ static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
|
||||
if (len & 511)
|
||||
return -EINVAL;
|
||||
|
||||
if (start + len > bdev_nr_bytes(bdev))
|
||||
if (check_add_overflow(start, len, &end) ||
|
||||
end > bdev_nr_bytes(bdev))
|
||||
return -EINVAL;
|
||||
|
||||
filemap_invalidate_lock(inode->i_mapping);
|
||||
|
@ -662,14 +662,15 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz,
|
||||
{
|
||||
int result;
|
||||
|
||||
tz->thermal_zone = thermal_zone_device_register_with_trips("acpitz",
|
||||
trip_table,
|
||||
trip_count,
|
||||
tz,
|
||||
&acpi_thermal_zone_ops,
|
||||
NULL,
|
||||
passive_delay,
|
||||
tz->polling_frequency * 100);
|
||||
if (trip_count)
|
||||
tz->thermal_zone = thermal_zone_device_register_with_trips(
|
||||
"acpitz", trip_table, trip_count, tz,
|
||||
&acpi_thermal_zone_ops, NULL, passive_delay,
|
||||
tz->polling_frequency * 100);
|
||||
else
|
||||
tz->thermal_zone = thermal_tripless_zone_device_register(
|
||||
"acpitz", tz, &acpi_thermal_zone_ops, NULL);
|
||||
|
||||
if (IS_ERR(tz->thermal_zone))
|
||||
return PTR_ERR(tz->thermal_zone);
|
||||
|
||||
@ -901,11 +902,8 @@ static int acpi_thermal_add(struct acpi_device *device)
|
||||
trip++;
|
||||
}
|
||||
|
||||
if (trip == trip_table) {
|
||||
if (trip == trip_table)
|
||||
pr_warn(FW_BUG "No valid trip points!\n");
|
||||
result = -ENODEV;
|
||||
goto free_memory;
|
||||
}
|
||||
|
||||
result = acpi_thermal_register_thermal_zone(tz, trip_table,
|
||||
trip - trip_table,
|
||||
|
@ -30,7 +30,6 @@
|
||||
#define ST_AHCI_OOBR_CIMAX_SHIFT 0
|
||||
|
||||
struct st_ahci_drv_data {
|
||||
struct platform_device *ahci;
|
||||
struct reset_control *pwr;
|
||||
struct reset_control *sw_rst;
|
||||
struct reset_control *pwr_rst;
|
||||
|
@ -1371,9 +1371,6 @@ static struct pci_driver pata_macio_pci_driver = {
|
||||
.suspend = pata_macio_pci_suspend,
|
||||
.resume = pata_macio_pci_resume,
|
||||
#endif
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, pata_macio_pci_match);
|
||||
|
||||
|
@ -200,7 +200,10 @@ int gemini_sata_start_bridge(struct sata_gemini *sg, unsigned int bridge)
|
||||
pclk = sg->sata0_pclk;
|
||||
else
|
||||
pclk = sg->sata1_pclk;
|
||||
clk_enable(pclk);
|
||||
ret = clk_enable(pclk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
msleep(10);
|
||||
|
||||
/* Do not keep clocking a bridge that is not online */
|
||||
|
@ -787,37 +787,6 @@ static const struct ata_port_info mv_port_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static const struct mv_hw_ops mv5xxx_ops = {
|
||||
.phy_errata = mv5_phy_errata,
|
||||
.enable_leds = mv5_enable_leds,
|
||||
@ -4303,6 +4272,36 @@ static int mv_pci_init_one(struct pci_dev *pdev,
|
||||
static int mv_pci_device_resume(struct pci_dev *pdev);
|
||||
#endif
|
||||
|
||||
static const struct pci_device_id mv_pci_tbl[] = {
|
||||
{ PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
|
||||
/* RocketRAID 1720/174x have different identifiers */
|
||||
{ PCI_VDEVICE(TTI, 0x1720), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1740), chip_6042 },
|
||||
{ PCI_VDEVICE(TTI, 0x1742), chip_6042 },
|
||||
|
||||
{ PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
|
||||
{ PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
|
||||
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
|
||||
|
||||
/* Adaptec 1430SA */
|
||||
{ PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
|
||||
|
||||
/* Marvell 7042 support */
|
||||
{ PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
|
||||
|
||||
/* Highpoint RocketRAID PCIe series */
|
||||
{ PCI_VDEVICE(TTI, 0x2300), chip_7042 },
|
||||
{ PCI_VDEVICE(TTI, 0x2310), chip_7042 },
|
||||
|
||||
{ } /* terminate list */
|
||||
};
|
||||
|
||||
static struct pci_driver mv_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
@ -4315,6 +4314,7 @@ static struct pci_driver mv_pci_driver = {
|
||||
#endif
|
||||
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
|
||||
/**
|
||||
* mv_print_info - Dump key info to kernel log for perusal.
|
||||
@ -4487,7 +4487,6 @@ static void __exit mv_exit(void)
|
||||
MODULE_AUTHOR("Brett Russ");
|
||||
MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_ALIAS("platform:" DRV_NAME);
|
||||
|
||||
|
@ -957,8 +957,7 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
|
||||
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long) (window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_fromio(psource, dimm_mmio + offset / 4, dist);
|
||||
|
||||
psource += dist;
|
||||
@ -1005,8 +1004,7 @@ static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
|
||||
readl(mmio + PDC_DIMM_WINDOW_CTLR);
|
||||
offset -= (idx * window_size);
|
||||
idx++;
|
||||
dist = ((long)(s32)(window_size - (offset + size))) >= 0 ? size :
|
||||
(long) (window_size - offset);
|
||||
dist = min(size, window_size - offset);
|
||||
memcpy_toio(dimm_mmio + offset / 4, psource, dist);
|
||||
writel(0x01, mmio + PDC_GENERAL_CTLR);
|
||||
readl(mmio + PDC_GENERAL_CTLR);
|
||||
|
@ -44,6 +44,7 @@ static bool fw_devlink_is_permissive(void);
|
||||
static void __fw_devlink_link_to_consumers(struct device *dev);
|
||||
static bool fw_devlink_drv_reg_done;
|
||||
static bool fw_devlink_best_effort;
|
||||
static struct workqueue_struct *device_link_wq;
|
||||
|
||||
/**
|
||||
* __fwnode_link_add - Create a link between two fwnode_handles.
|
||||
@ -533,12 +534,26 @@ static void devlink_dev_release(struct device *dev)
|
||||
/*
|
||||
* It may take a while to complete this work because of the SRCU
|
||||
* synchronization in device_link_release_fn() and if the consumer or
|
||||
* supplier devices get deleted when it runs, so put it into the "long"
|
||||
* workqueue.
|
||||
* supplier devices get deleted when it runs, so put it into the
|
||||
* dedicated workqueue.
|
||||
*/
|
||||
queue_work(system_long_wq, &link->rm_work);
|
||||
queue_work(device_link_wq, &link->rm_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* device_link_wait_removal - Wait for ongoing devlink removal jobs to terminate
|
||||
*/
|
||||
void device_link_wait_removal(void)
|
||||
{
|
||||
/*
|
||||
* devlink removal jobs are queued in the dedicated work queue.
|
||||
* To be sure that all removal jobs are terminated, ensure that any
|
||||
* scheduled work has run to completion.
|
||||
*/
|
||||
flush_workqueue(device_link_wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_link_wait_removal);
|
||||
|
||||
static struct class devlink_class = {
|
||||
.name = "devlink",
|
||||
.dev_groups = devlink_groups,
|
||||
@ -4164,9 +4179,14 @@ int __init devices_init(void)
|
||||
sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
|
||||
if (!sysfs_dev_char_kobj)
|
||||
goto char_kobj_err;
|
||||
device_link_wq = alloc_workqueue("device_link_wq", 0, 0);
|
||||
if (!device_link_wq)
|
||||
goto wq_err;
|
||||
|
||||
return 0;
|
||||
|
||||
wq_err:
|
||||
kobject_put(sysfs_dev_char_kobj);
|
||||
char_kobj_err:
|
||||
kobject_put(sysfs_dev_block_kobj);
|
||||
block_kobj_err:
|
||||
|
@ -112,7 +112,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
|
||||
unsigned long *entry, *lower, *upper;
|
||||
unsigned long lower_index, lower_last;
|
||||
unsigned long upper_index, upper_last;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lower = NULL;
|
||||
upper = NULL;
|
||||
@ -145,7 +145,7 @@ static int regcache_maple_drop(struct regmap *map, unsigned int min,
|
||||
upper_index = max + 1;
|
||||
upper_last = mas.last;
|
||||
|
||||
upper = kmemdup(&entry[max + 1],
|
||||
upper = kmemdup(&entry[max - mas.index + 1],
|
||||
((mas.last - max) *
|
||||
sizeof(unsigned long)),
|
||||
map->alloc_flags);
|
||||
@ -244,7 +244,7 @@ static int regcache_maple_sync(struct regmap *map, unsigned int min,
|
||||
unsigned long lmin = min;
|
||||
unsigned long lmax = max;
|
||||
unsigned int r, v, sync_start;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
bool sync_needed = false;
|
||||
|
||||
map->cache_bypass = true;
|
||||
|
@ -1965,10 +1965,10 @@ static int null_add_dev(struct nullb_device *dev)
|
||||
|
||||
out_ida_free:
|
||||
ida_free(&nullb_indexes, nullb->index);
|
||||
out_cleanup_zone:
|
||||
null_free_zoned_dev(dev);
|
||||
out_cleanup_disk:
|
||||
put_disk(nullb->disk);
|
||||
out_cleanup_zone:
|
||||
null_free_zoned_dev(dev);
|
||||
out_cleanup_tags:
|
||||
if (nullb->tag_set == &nullb->__tag_set)
|
||||
blk_mq_free_tag_set(nullb->tag_set);
|
||||
|
@ -826,11 +826,15 @@ EXPORT_SYMBOL_GPL(qca_uart_setup);
|
||||
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
{
|
||||
bdaddr_t bdaddr_swapped;
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr,
|
||||
HCI_EV_VENDOR, HCI_INIT_TIMEOUT);
|
||||
baswap(&bdaddr_swapped, bdaddr);
|
||||
|
||||
skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6,
|
||||
&bdaddr_swapped, HCI_EV_VENDOR,
|
||||
HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err);
|
||||
|
@ -7,7 +7,6 @@
|
||||
*
|
||||
* Copyright (C) 2007 Texas Instruments, Inc.
|
||||
* Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
*
|
||||
* Acknowledgements:
|
||||
* This file is based on hci_ll.c, which was...
|
||||
@ -226,6 +225,7 @@ struct qca_serdev {
|
||||
struct qca_power *bt_power;
|
||||
u32 init_speed;
|
||||
u32 oper_speed;
|
||||
bool bdaddr_property_broken;
|
||||
const char *firmware_name;
|
||||
};
|
||||
|
||||
@ -1843,6 +1843,7 @@ static int qca_setup(struct hci_uart *hu)
|
||||
const char *firmware_name = qca_get_firmware_name(hu);
|
||||
int ret;
|
||||
struct qca_btsoc_version ver;
|
||||
struct qca_serdev *qcadev;
|
||||
const char *soc_name;
|
||||
|
||||
ret = qca_check_speeds(hu);
|
||||
@ -1904,16 +1905,11 @@ retry:
|
||||
case QCA_WCN6750:
|
||||
case QCA_WCN6855:
|
||||
case QCA_WCN7850:
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
|
||||
/* Set BDA quirk bit for reading BDA value from fwnode property
|
||||
* only if that property exist in DT.
|
||||
*/
|
||||
if (fwnode_property_present(dev_fwnode(hdev->dev.parent), "local-bd-address")) {
|
||||
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
|
||||
bt_dev_info(hdev, "setting quirk bit to read BDA from fwnode later");
|
||||
} else {
|
||||
bt_dev_dbg(hdev, "local-bd-address` is not present in the devicetree so not setting quirk bit for BDA");
|
||||
}
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
if (qcadev->bdaddr_property_broken)
|
||||
set_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks);
|
||||
|
||||
hci_set_aosp_capable(hdev);
|
||||
|
||||
@ -2295,6 +2291,9 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
if (!qcadev->oper_speed)
|
||||
BT_DBG("UART will pick default operating speed");
|
||||
|
||||
qcadev->bdaddr_property_broken = device_property_read_bool(&serdev->dev,
|
||||
"qcom,local-bd-address-broken");
|
||||
|
||||
if (data)
|
||||
qcadev->btsoc_type = data->soc_type;
|
||||
else
|
||||
|
@ -2060,6 +2060,8 @@ static void bus_reset_work(struct work_struct *work)
|
||||
|
||||
ohci->generation = generation;
|
||||
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
|
||||
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
|
||||
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
|
||||
|
||||
if (ohci->quirks & QUIRK_RESET_PACKET)
|
||||
ohci->request_generation = generation;
|
||||
@ -2125,12 +2127,14 @@ static irqreturn_t irq_handler(int irq, void *data)
|
||||
return IRQ_NONE;
|
||||
|
||||
/*
|
||||
* busReset and postedWriteErr must not be cleared yet
|
||||
* busReset and postedWriteErr events must not be cleared yet
|
||||
* (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
|
||||
*/
|
||||
reg_write(ohci, OHCI1394_IntEventClear,
|
||||
event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
|
||||
log_irqs(ohci, event);
|
||||
if (event & OHCI1394_busReset)
|
||||
reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
|
||||
|
||||
if (event & OHCI1394_selfIDComplete)
|
||||
queue_work(selfid_workqueue, &ohci->bus_reset_work);
|
||||
|
@ -728,6 +728,25 @@ static u32 line_event_id(int level)
|
||||
GPIO_V2_LINE_EVENT_FALLING_EDGE;
|
||||
}
|
||||
|
||||
static inline char *make_irq_label(const char *orig)
|
||||
{
|
||||
char *new;
|
||||
|
||||
if (!orig)
|
||||
return NULL;
|
||||
|
||||
new = kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
|
||||
if (!new)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
static inline void free_irq_label(const char *label)
|
||||
{
|
||||
kfree(label);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HTE
|
||||
|
||||
static enum hte_return process_hw_ts_thread(void *p)
|
||||
@ -1015,6 +1034,7 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
|
||||
{
|
||||
unsigned long irqflags;
|
||||
int ret, level, irq;
|
||||
char *label;
|
||||
|
||||
/* try hardware */
|
||||
ret = gpiod_set_debounce(line->desc, debounce_period_us);
|
||||
@ -1037,11 +1057,17 @@ static int debounce_setup(struct line *line, unsigned int debounce_period_us)
|
||||
if (irq < 0)
|
||||
return -ENXIO;
|
||||
|
||||
label = make_irq_label(line->req->label);
|
||||
if (IS_ERR(label))
|
||||
return -ENOMEM;
|
||||
|
||||
irqflags = IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING;
|
||||
ret = request_irq(irq, debounce_irq_handler, irqflags,
|
||||
line->req->label, line);
|
||||
if (ret)
|
||||
label, line);
|
||||
if (ret) {
|
||||
free_irq_label(label);
|
||||
return ret;
|
||||
}
|
||||
line->irq = irq;
|
||||
} else {
|
||||
ret = hte_edge_setup(line, GPIO_V2_LINE_FLAG_EDGE_BOTH);
|
||||
@ -1083,16 +1109,6 @@ static u32 gpio_v2_line_config_debounce_period(struct gpio_v2_line_config *lc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline char *make_irq_label(const char *orig)
|
||||
{
|
||||
return kstrdup_and_replace(orig, '/', ':', GFP_KERNEL);
|
||||
}
|
||||
|
||||
static inline void free_irq_label(const char *label)
|
||||
{
|
||||
kfree(label);
|
||||
}
|
||||
|
||||
static void edge_detector_stop(struct line *line)
|
||||
{
|
||||
if (line->irq) {
|
||||
@ -1158,8 +1174,8 @@ static int edge_detector_setup(struct line *line,
|
||||
irqflags |= IRQF_ONESHOT;
|
||||
|
||||
label = make_irq_label(line->req->label);
|
||||
if (!label)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(label))
|
||||
return PTR_ERR(label);
|
||||
|
||||
/* Request a thread to read the events */
|
||||
ret = request_threaded_irq(irq, edge_irq_handler, edge_irq_thread,
|
||||
@ -2217,8 +2233,8 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
|
||||
goto out_free_le;
|
||||
|
||||
label = make_irq_label(le->label);
|
||||
if (!label) {
|
||||
ret = -ENOMEM;
|
||||
if (IS_ERR(label)) {
|
||||
ret = PTR_ERR(label);
|
||||
goto out_free_le;
|
||||
}
|
||||
|
||||
|
@ -1175,6 +1175,9 @@ struct gpio_device *gpio_device_find(const void *data,
|
||||
|
||||
list_for_each_entry_srcu(gdev, &gpio_devices, list,
|
||||
srcu_read_lock_held(&gpio_devices_srcu)) {
|
||||
if (!device_is_registered(&gdev->dev))
|
||||
continue;
|
||||
|
||||
guard(srcu)(&gdev->srcu);
|
||||
|
||||
gc = srcu_dereference(gdev->chip, &gdev->srcu);
|
||||
|
@ -52,7 +52,7 @@
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @offset: register offset
|
||||
* @buffer: buffer for return data
|
||||
* @size: sizo of the buffer
|
||||
* @size: size of the buffer
|
||||
*
|
||||
* Reads @size bytes from the DP dual mode adaptor registers
|
||||
* starting at @offset.
|
||||
@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_read);
|
||||
* @adapter: I2C adapter for the DDC bus
|
||||
* @offset: register offset
|
||||
* @buffer: buffer for write data
|
||||
* @size: sizo of the buffer
|
||||
* @size: size of the buffer
|
||||
*
|
||||
* Writes @size bytes to the DP dual mode adaptor registers
|
||||
* starting at @offset.
|
||||
|
@ -582,7 +582,12 @@ int drm_gem_map_attach(struct dma_buf *dma_buf,
|
||||
{
|
||||
struct drm_gem_object *obj = dma_buf->priv;
|
||||
|
||||
if (!obj->funcs->get_sg_table)
|
||||
/*
|
||||
* drm_gem_map_dma_buf() requires obj->get_sg_table(), but drivers
|
||||
* that implement their own ->map_dma_buf() do not.
|
||||
*/
|
||||
if (dma_buf->ops->map_dma_buf == drm_gem_map_dma_buf &&
|
||||
!obj->funcs->get_sg_table)
|
||||
return -ENOSYS;
|
||||
|
||||
return drm_gem_pin(obj);
|
||||
|
@ -118,6 +118,7 @@ gt-y += \
|
||||
gt/intel_ggtt_fencing.o \
|
||||
gt/intel_gt.o \
|
||||
gt/intel_gt_buffer_pool.o \
|
||||
gt/intel_gt_ccs_mode.o \
|
||||
gt/intel_gt_clock_utils.o \
|
||||
gt/intel_gt_debugfs.o \
|
||||
gt/intel_gt_engines_debugfs.o \
|
||||
|
@ -2709,15 +2709,6 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
|
||||
*/
|
||||
intel_de_write(dev_priv, PIPESRC(pipe),
|
||||
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et)
|
||||
return;
|
||||
|
||||
width = drm_rect_width(&crtc_state->psr2_su_area);
|
||||
height = drm_rect_height(&crtc_state->psr2_su_area);
|
||||
|
||||
intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe),
|
||||
PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1));
|
||||
}
|
||||
|
||||
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
|
||||
|
@ -47,6 +47,7 @@ struct drm_printer;
|
||||
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
|
||||
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
|
||||
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
|
||||
#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
|
||||
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
|
||||
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
|
||||
#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
|
||||
|
@ -1423,6 +1423,8 @@ struct intel_crtc_state {
|
||||
|
||||
u32 psr2_man_track_ctl;
|
||||
|
||||
u32 pipe_srcsz_early_tpt;
|
||||
|
||||
struct drm_rect psr2_su_area;
|
||||
|
||||
/* Variable Refresh Rate state */
|
||||
|
@ -499,7 +499,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
|
||||
/* The values must be in increasing order */
|
||||
static const int mtl_rates[] = {
|
||||
162000, 216000, 243000, 270000, 324000, 432000, 540000, 675000,
|
||||
810000, 1000000, 1350000, 2000000,
|
||||
810000, 1000000, 2000000,
|
||||
};
|
||||
static const int icl_rates[] = {
|
||||
162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
|
||||
@ -1422,7 +1422,8 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
|
||||
if (DISPLAY_VER(dev_priv) >= 12)
|
||||
return true;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
|
||||
if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
|
||||
!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1917,8 +1918,9 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
|
||||
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp ||
|
||||
valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
if (valid_dsc_bpp[i] < dsc_min_bpp)
|
||||
continue;
|
||||
if (valid_dsc_bpp[i] > dsc_max_bpp)
|
||||
break;
|
||||
|
||||
ret = dsc_compute_link_config(intel_dp,
|
||||
@ -6557,6 +6559,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
|
||||
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
|
||||
else
|
||||
intel_connector->get_hw_state = intel_connector_get_hw_state;
|
||||
intel_connector->sync_state = intel_dp_connector_sync_state;
|
||||
|
||||
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
|
||||
intel_dp_aux_fini(intel_dp);
|
||||
|
@ -1355,7 +1355,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 10 &&
|
||||
if (HAS_DSC_MST(dev_priv) &&
|
||||
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
|
||||
/*
|
||||
* TBD pass the connector BPC,
|
||||
|
@ -1994,6 +1994,7 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
|
||||
|
||||
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
struct intel_encoder *encoder;
|
||||
@ -2013,6 +2014,12 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
|
||||
|
||||
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(cpu_transcoder),
|
||||
crtc_state->psr2_man_track_ctl);
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et)
|
||||
return;
|
||||
|
||||
intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
|
||||
crtc_state->pipe_srcsz_early_tpt);
|
||||
}
|
||||
|
||||
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
|
||||
@ -2051,6 +2058,20 @@ exit:
|
||||
crtc_state->psr2_man_track_ctl = val;
|
||||
}
|
||||
|
||||
static u32 psr2_pipe_srcsz_early_tpt_calc(struct intel_crtc_state *crtc_state,
|
||||
bool full_update)
|
||||
{
|
||||
int width, height;
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et || full_update)
|
||||
return 0;
|
||||
|
||||
width = drm_rect_width(&crtc_state->psr2_su_area);
|
||||
height = drm_rect_height(&crtc_state->psr2_su_area);
|
||||
|
||||
return PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1);
|
||||
}
|
||||
|
||||
static void clip_area_update(struct drm_rect *overlap_damage_area,
|
||||
struct drm_rect *damage_area,
|
||||
struct drm_rect *pipe_src)
|
||||
@ -2095,21 +2116,36 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
|
||||
* cursor fully when cursor is in SU area.
|
||||
*/
|
||||
static void
|
||||
intel_psr2_sel_fetch_et_alignment(struct intel_crtc_state *crtc_state,
|
||||
struct intel_plane_state *cursor_state)
|
||||
intel_psr2_sel_fetch_et_alignment(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_rect inter;
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_plane_state *new_plane_state;
|
||||
struct intel_plane *plane;
|
||||
int i;
|
||||
|
||||
if (!crtc_state->enable_psr2_su_region_et ||
|
||||
!cursor_state->uapi.visible)
|
||||
if (!crtc_state->enable_psr2_su_region_et)
|
||||
return;
|
||||
|
||||
inter = crtc_state->psr2_su_area;
|
||||
if (!drm_rect_intersect(&inter, &cursor_state->uapi.dst))
|
||||
return;
|
||||
for_each_new_intel_plane_in_state(state, plane, new_plane_state, i) {
|
||||
struct drm_rect inter;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &cursor_state->uapi.dst,
|
||||
&crtc_state->pipe_src);
|
||||
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
|
||||
continue;
|
||||
|
||||
if (plane->id != PLANE_CURSOR)
|
||||
continue;
|
||||
|
||||
if (!new_plane_state->uapi.visible)
|
||||
continue;
|
||||
|
||||
inter = crtc_state->psr2_su_area;
|
||||
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
|
||||
continue;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &new_plane_state->uapi.dst,
|
||||
&crtc_state->pipe_src);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2152,8 +2188,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state,
|
||||
*cursor_plane_state = NULL;
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state;
|
||||
struct intel_plane *plane;
|
||||
bool full_update = false;
|
||||
int i, ret;
|
||||
@ -2238,13 +2273,6 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
damaged_area.x2 += new_plane_state->uapi.dst.x1 - src.x1;
|
||||
|
||||
clip_area_update(&crtc_state->psr2_su_area, &damaged_area, &crtc_state->pipe_src);
|
||||
|
||||
/*
|
||||
* Cursor plane new state is stored to adjust su area to cover
|
||||
* cursor are fully.
|
||||
*/
|
||||
if (plane->id == PLANE_CURSOR)
|
||||
cursor_plane_state = new_plane_state;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2273,9 +2301,13 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Adjust su area to cover cursor fully as necessary */
|
||||
if (cursor_plane_state)
|
||||
intel_psr2_sel_fetch_et_alignment(crtc_state, cursor_plane_state);
|
||||
/*
|
||||
* Adjust su area to cover cursor fully as necessary (early
|
||||
* transport). This needs to be done after
|
||||
* drm_atomic_add_affected_planes to ensure visible cursor is added into
|
||||
* affected planes even when cursor is not updated by itself.
|
||||
*/
|
||||
intel_psr2_sel_fetch_et_alignment(state, crtc);
|
||||
|
||||
intel_psr2_sel_fetch_pipe_alignment(crtc_state);
|
||||
|
||||
@ -2338,6 +2370,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
|
||||
skip_sel_fetch_set_loop:
|
||||
psr2_man_trk_ctl_calc(crtc_state, full_update);
|
||||
crtc_state->pipe_srcsz_early_tpt =
|
||||
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -961,6 +961,9 @@ static int gen8_init_rsvd(struct i915_address_space *vm)
|
||||
struct i915_vma *vma;
|
||||
int ret;
|
||||
|
||||
if (!intel_gt_needs_wa_16018031267(vm->gt))
|
||||
return 0;
|
||||
|
||||
/* The memory will be used only by GPU. */
|
||||
obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
|
||||
I915_BO_ALLOC_VOLATILE |
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user